diff --git "a/1441.jsonl" "b/1441.jsonl" new file mode 100644--- /dev/null +++ "b/1441.jsonl" @@ -0,0 +1,633 @@ +{"seq_id":"29407675639","text":"#!/usr/bin/env python\n\n\n\"\"\"\nAWS CloudFormation deployment script\n\nCOMMAND is either 'CREATE', 'UPDATE', or 'DELETE'.\n\nCONFIG_FILE contains the infrastructure stack details in\nthe following JSON format:\n\n{\n \"name\": \"STACK_NAME\",\n \"region\": \"REGION\",\n \"template\": \"CLOUDFORMATION_TEMPLATE_FILE.yml\",\n \"parameters\": \"CLOUDFORMATION_PARAMETERS_FILE.json\"\n}\n\"\"\"\n\n\nimport click\nimport json\nimport os\n\n\ndef deploy_stack(config_file, command):\n \"\"\"deploy infrastructure as specified in CloudFormation template\"\"\"\n try:\n with open(config_file) as config:\n stack = json.load(config)\n except (FileNotFoundError, IsADirectoryError, PermissionError) as err:\n click.echo(f\"ERROR: {err}\")\n else:\n exec(f\"{command}_stack(**stack)\")\n\n\ndef create_stack(name, template, parameters, region):\n \"\"\"create stack as specified in CloudFormation template\"\"\"\n os.system(\n f\"aws cloudformation create-stack\"\n f\" --stack-name {name}\"\n f\" --template-body file://{template}\"\n f\" --parameters file://{parameters}\"\n f' --capabilities \"CAPABILITY_IAM\" \"CAPABILITY_NAMED_IAM\"'\n f\" --region={region}\"\n )\n\n\ndef update_stack(name, template, parameters, region):\n \"\"\"update stack as specified in CloudFormation template\"\"\"\n os.system(\n f\"aws cloudformation update-stack\"\n f\" --stack-name {name}\"\n f\" --template-body file://{template}\"\n f\" --parameters file://{parameters}\"\n f' --capabilities \"CAPABILITY_IAM\" \"CAPABILITY_NAMED_IAM\"'\n f\" --region={region}\"\n )\n\n\ndef delete_stack(name, **kwargs):\n \"\"\"delete specified stack\"\"\"\n os.system(f\"aws cloudformation delete-stack\" f\" --stack-name {name}\")\n\n\n@click.command()\n@click.argument(\"config-file\")\n@click.option(\n \"-c\",\n \"--command\",\n required=True,\n default=\"update\",\n type=click.Choice([\"create\", \"update\", \"delete\"], case_sensitive=False),\n)\ndef cli(*, config_file, command):\n \"\"\"Deploy infrastructure specified in CONFIG_FILE\n\n CONFIG_FILE contains the infrastructure stack details in JSON format.\n \"\"\"\n deploy_stack(config_file, command)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"davidsimowitz/high-availability-cloudformation-deployment","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"23617278423","text":"import requests\r\nfrom datetime import datetime\r\nimport os\r\n\r\nAPP_ID = os.environ[\"APP_ID\"]\r\nAPI_KEY = os.environ[\"API_KEY\"]\r\nSHEET_ENDPOINT = os.environ[\"SHEET_ENDPOINT\"]\r\nTOKEN = os.environ[\"TOKEN\"]\r\n\r\nheaders = {\r\n \"x-app-id\": APP_ID,\r\n \"x-app-key\": API_KEY\r\n}\r\n\r\nexercise_endpoint = \"https://trackapi.nutritionix.com/v2/natural/exercise\"\r\n\r\n#exercise = input(\"Tell me which exercises you did:\")\r\nexercise = input(\"Tell me which exercises you did: \")\r\n\r\nrequest_body = {\r\n \"query\": exercise,\r\n \"gender\": \"male\",\r\n \"weight_kg\": 75.5,\r\n \"height_cm\": 176,\r\n \"age\": 25\r\n}\r\n\r\nrequest = requests.post(exercise_endpoint, json=request_body, headers=headers)\r\nrequest.raise_for_status()\r\n\r\nexercise_json = request.json()[\"exercises\"]\r\n\r\ndatetime_now = datetime.now()\r\ndate_now = datetime_now.strftime(\"%d/%m/%Y\")\r\ntime_now = datetime_now.strftime(\"%X\")\r\n\r\nfor exercise in exercise_json:\r\n post_params = {\r\n \"workout\": {\r\n \"date\": date_now,\r\n \"time\": time_now,\r\n \"exercise\": exercise[\"user_input\"],\r\n \"duration\": exercise[\"duration_min\"],\r\n \"calories\": exercise[\"nf_calories\"]\r\n }\r\n }\r\n\r\n workout_url = SHEET_ENDPOINT\r\n sheety_headers = {\"Authorization\": TOKEN}\r\n\r\n post_response = requests.post(url=workout_url, json=post_params, headers=sheety_headers)\r\n post_response.raise_for_status()\r\n\r\n print(post_response.text)\r\n","repo_name":"EricKurachi/udemy-100-days-python-bootcamp","sub_path":"workout-tracking/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20933287947","text":"class VersionProperties:\n KEEP_OLD_VERSION = -1 # set to keep the old version\n RAISE_VERSION_BY_ONE = -2 # raise the version by one (if set in version name, will raise in a fraction)\n\n def __init__(self, new_version_code: int, new_version_name):\n \"\"\"\n Create this instance in order to set the desired version code and name for the new release.\n NOTICE: each of this properties can also hold VersionProperties.KEEP_OLD_VERSION or VersionProperties.RAISE_VERSION_BY_ONE.\n\n Args:\n new_version_code: the version code to be placed in the build.gradle for the new release\n new_version_name: the version name to be placed in the build.gradle for the new release\n \"\"\"\n self.new_version_code = new_version_code\n self.new_version_name = new_version_name\n","repo_name":"osfunapps/os-android-app-version-changer-py","sub_path":"os_android_app_version_changer/objs/VersionProperties.py","file_name":"VersionProperties.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"6314399485","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 11 13:21:09 2021\r\n\r\n@author: putnamm3196\r\n\"\"\"\r\n\r\nfrom Game import Game\r\nfrom Room import Room\r\nfrom Player import Player\r\nfrom Item import Item\r\n\r\n\r\nclass MyGame(Game):\r\n def setup(self):\r\n loader = MyGameLoader()\r\n self.rooms = loader.setup()\r\n self.here = self.rooms[\"Hub\"]\r\n self.here.describe()\r\n \r\n \r\nclass MyGameLoader: \r\n def setup(self):\r\n \"\"\" setup(): create a graph of rooms for play. \"\"\"\r\n # just a test -- needs work\r\n\r\n dungeonEntrance = Room( \"Dungeon Entrance\", \r\n \"\",\r\n { \"east\": \"Hub\",\r\n \"north\": \"Lobby\"} )\r\n dungeonLobby = Room( \"Lobby\",\r\n \"\",\r\n {\"south\": \"Dungeon Entrance\",\r\n \"down\": \"Basement\"})\r\n \r\n dungeonBasement = Room ( \"Basement\",\r\n \"\",\r\n {\"up\": \"Lobby\"})\r\n tournament = Room ( \"Tournament\",\r\n \"\",\r\n { \"north\" : \"Hub\"} )\r\n \r\n weaponShop = Room ( \"Weapon Shop\", \r\n \"\",\r\n { \"south\" : \"Hub\" } )\r\n \r\n \r\n bossArena = Room ( \"Boss Arena\",\r\n \"\",\r\n {\"west\" : \"Hub\"})\r\n hub = Room ( \"Hub\",\r\n \" \",\r\n {\"north\" : \"Weapon Shop\",\r\n \"south\" : \"Tournament\",\r\n \"east\" : \"Boss Arena\",\r\n \"west\": \"Dungeon Entrance\"})\r\n \r\n # Place rooms in a dictionary.\r\n\r\n rooms = {dungeonEntrance.name: dungeonEntrance,\r\n dungeonLobby.name: dungeonLobby,\r\n dungeonBasement.name: dungeonBasement,\r\n tournament.name: tournament,\r\n weaponShop.name: weaponShop,\r\n bossArena.name: bossArena,\r\n hub.name: hub}\r\n \r\n phone = Item(\"phone\",\"a mysterious device, probably should be thrown away.\")\r\n hub.addItem(phone)\r\n \r\n sword = Item(\"sword\",\"the best sword from the local blacksmith\")\r\n hub.addItem(sword)\r\n \r\n return rooms\r\n \r\ndef main():\r\n game = MyGame()\r\n game.setup()\r\n game.output(\"\"\"\r\nStarting game -- type help if needed -- enter command.\r\nWelcome to the start of your journey! Your goal is to train your\r\ncharacter to defeat the epic dragon. To start, you should go to the weaponsmith\r\nto get your sword.\"\"\")\r\n game.loop()\r\n game.end()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"PutnamM3196/CSC221","sub_path":"Game_Putnam/MyGame.py","file_name":"MyGame.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22106510006","text":"#!/usr/bin/python \n \nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\nfrom mininet.node import OVSSwitch, Controller, RemoteController\n\nclass DiamondTopoEqualWeight(Topo):\n \"\"\"\n Diamond topology connecting n nodes\n on one side with n nodes on the other such\n that there are two paths between the sides\n \n --------\n | |\n | |\n | s2 |\n -------- | | --------\n h === | p1 === p1 p2 === p2 | === h\n h === | | -------- | | === h\n h === | s1 | | s4 | === h\n h === | | -------- | | === h\n h === | p2 === p1 p2 === p1 | === h\n -------- | | --------\n | s3 |\n | |\n | | \n --------\n \n \"\"\"\n def build(self, leaves):\n switch1 = self.addSwitch('s1')\n switch2 = self.addSwitch('s2')\n switch3 = self.addSwitch('s3')\n switch4 = self.addSwitch('s4')\n\n self.addLink(switch1, switch2)\n self.addLink(switch1, switch3)\n\n self.addLink(switch4, switch3)\n self.addLink(switch4, switch2)\n\n for h in range(leaves):\n host = self.addHost('h%s' % (h + 1))\n self.addLink(host, switch1)\n\n for h in range(leaves):\n host = self.addHost('h%s' % (h + leaves + 1))\n self.addLink(host, switch4)\n\ntopos = {'diamond-equal' : DiamondTopoEqualWeight}\n","repo_name":"Ipiano/network-optimizer","sub_path":"mininet_ext/diamond.py","file_name":"diamond.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"9419751773","text":"def cycle(fish: list[int]) -> list[int]:\n new_l = []\n\n for f in fish:\n f -= 1\n if f == -1:\n new_l.extend([6, 8])\n else:\n new_l.append(f)\n return new_l\n\n\ndef compute(data):\n \"\"\"\n >>> compute(\"3,4,3,1,2\")\n 5934\n \"\"\"\n fish = [int(n) for n in data.split(\",\")]\n\n for _ in range(80):\n fish = cycle(fish)\n\n return len(fish)\n\n\ndef main():\n import pathlib\n\n input_path = pathlib.Path(__file__).with_name(\"input.txt\")\n\n with input_path.open() as f:\n print(compute(f.read()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"woranov/aoc2021","sub_path":"day06/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20076466449","text":"from google.cloud import ndb\nfrom flask import Flask, request\nfrom os import environ\nimport logging\n\n\nclass Room(ndb.Model):\n name = ndb.StringProperty()\n capacity = ndb.IntegerProperty()\n schedule = ndb.StringProperty()\n takers = ndb.StringProperty()\n\n\nclient = ndb.Client()\n\n\n# Middleware to obtain new client context for each request. This code borrowed from Google\n# at https://cloud.google.com/appengine/docs/standard/python3/migrating-to-cloud-ndb\ndef ndb_wsgi_middleware(wsgi_app):\n def middleware(environ, start_response):\n with client.context():\n return wsgi_app(environ, start_response)\n\n return middleware\n\n\napp = Flask(__name__)\n# Wrap app in middleware\napp.wsgi_app = ndb_wsgi_middleware(app.wsgi_app)\n\n\n@app.route('/list')\ndef list_rooms():\n rooms = [\n {\n 'name': r.name,\n 'capacity': r.capacity,\n 'schedule': r.schedule,\n 'takers': r.takers,\n 'key': r.key.urlsafe().decode('utf-8')\n } for r in Room.query().order(Room.name)]\n\n resp = ''\n for r in rooms:\n resp += '|'.join([r['key'], r['name'], str(r['capacity']), r['schedule'], r['takers']]) + ';'\n\n return resp\n\n\n@app.route('/save', methods=['POST'])\ndef save_room():\n params = request.get_json(force=True)\n name = params['name']\n capacity = params['capacity']\n schedule = params['schedule']\n takers = params['takers']\n\n room = Room(name=name, capacity=capacity, schedule=schedule, takers=takers)\n key = room.put()\n return 'roomkey=' + key.urlsafe().decode('utf-8')\n\n\n@app.route('/update', methods=['POST'])\n@ndb.transactional(retries=0)\ndef reserve_room():\n params = request.get_json(force=True)\n roomkey = params['roomkey']\n schedule = params['schedule']\n taker = params['taker']\n\n try:\n room = ndb.Key(urlsafe=roomkey).get()\n\n allok = True\n for idx, slot in enumerate(schedule):\n if slot == '1':\n if room.schedule[idx] == '0':\n x = list(room.schedule)\n x[idx] = '1'\n room.schedule = ''.join(x)\n\n x = room.takers.split(':')\n logging.debug('PJS: idx={0}, x length={1}'.format(idx, len(x)))\n x[idx] = taker\n room.takers = ':'.join(x)\n else:\n allok = False\n break\n\n if allok:\n room.put()\n resp = 'roomkey=' + roomkey\n else:\n resp = 'error=Room not free at requested times'\n except Exception as ex:\n resp = f'error=Could not reserve room for requested times; {ex}'\n\n return resp\n\n\n@app.route('/purge')\ndef purge_all():\n keys = [r.key for r in Room.query()]\n\n try:\n ndb.delete_multi(keys)\n resp = 'All rooms deleted'\n except Exception as ex:\n resp = f'Error purging rooms: {ex}'\n\n return resp\n","repo_name":"psterpe/VBARoomScheduler","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30781485474","text":"#coding:utf-8\n\n\nfrom apptest.PO import Superunit\nfrom apptest.PO import MinePage\nfrom apptest.Public.getmethodname import GetName\nfrom apptest.PO import NewsPage\nfrom apptest.PO import BasePage\nfrom apptest.PO import InsideNewsPage\nfrom apptest.Public.screenshot import screenshot\nfrom apptest.Public.sendemail import smail\nimport os ,time\nfrom apptest.PO import LoginPage\nfrom apptest.PO import TaskCenterPage\nfrom apptest.PO import PursePage\nfrom apptest.PO import InsideNewsPage\nclass zanANDword(Superunit.initunit):\n work_path=os.path.dirname(os.getcwd())+\"\\\\screen\\\\mine\\\\\"+time.strftime('%Y-%m-%d',time.localtime(time.time()))+\"\\\\\"\n\n def test_01_zanword(self):\n name1=self.__class__.__name__+'.'+GetName.get_current_function_name()\n times=[]\n MinePage.Mine(self.driver).click_mine_entry()\n LoginPage.Login(self.driver).input_nologin()\n try:\n self.assertTrue(MinePage.Mine(self.driver).find_logintip())\n self.assertTrue(LoginPage.Login(self.driver).find_tvcommand())\n self.assertTrue(LoginPage.Login(self.driver).find_tvtitle())\n self.assertTrue(LoginPage.Login(self.driver).find_tvtime())\n except:\n print (u\"个人中心>点赞和评论-界面显示异常\")\n filename=screenshot().screencap(self.work_path,self.driver,name=name1)\n body=u\"个人中心>点赞和评论-界面显示异常\"\n smail().send_errormsg(str(filename),body,self.work_path)\n else:\n BasePage.Base(self.driver).do_swipe(self.driver,\"up\")\n for i in LoginPage.Login(self.driver).find_tvtime():\n b=time.mktime(time.strptime(\"2016-\"+i.text+\":00\",'%Y-%m-%d %H:%M:%S'))\n times.append(int(b))\n if times[2]:\n if times[0]>=times[1]>=times[2]:\n pass\n else:\n print (u\"个人中心>点赞和评论-时间顺序显示异常\")\n filename=screenshot().screencap(self.work_path,self.driver,name=name1)\n body1=u\"个人中心>点赞和评论-时间顺序显示异常\"\n smail().send_errormsg(str(filename),body1,self.work_path)\n LoginPage.Login(self.driver).click_tvtile()\n try:\n self.assertTrue(InsideNewsPage.Inside(self.driver).find_configloc())\n self.assertFalse(LoginPage.Login(self.driver).find_tvcommand())\n except:\n print (u\"个人中心>点赞和评论-跳转到内页显示异常\")\n filename=screenshot().screencap(self.work_path,self.driver,name=name1)\n body2=u\"个人中心>点赞和评论-跳转到内页显示异常\"\n smail().send_errormsg(str(filename),body2,self.work_path)\n else:\n self.driver.back()\n LoginPage.Login(self.driver).click_praise()\n try:\n self.assertTrue(LoginPage.Login(self.driver).find_praisenum())\n except:\n print (u\"个人中心>点赞和评论-跳转到内页显示异常\")\n filename=screenshot().screencap(self.work_path,self.driver,name=name1)\n body3=u\"个人中心>点赞和评论-跳转到内页显示异常\"\n smail().send_errormsg(str(filename),body3,self.work_path)\n","repo_name":"xiaominwanglast/uiautomator","sub_path":"android_warning/android_warning/apptest/TestCase/mine/zanandword.py","file_name":"zanandword.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32933913339","text":"\n\nfrom kivy.uix.label import Label\nfrom kivy.uix.image import Image\n\nfrom kivy.lang import Builder\nfrom kivy.base import runTouchApp\n\n\nBuilder.load_string('''\n\n:\n text: 'THE BACKGROUND'\n font_size: 150\n Image:\n source: 'colours.png'\n allow_stretch: True\n keep_ratio: False\n Image:\n source: 'colours2.png'\n allow_stretch: True\n keep_ratio: False\n Image:\n source: 'colours.png'\n allow_stretch: True\n keep_ratio: False\n''')\n\n\nclass RootWidget(Label):\n\n def do_layout(self, *args):\n number_of_children = len(self.children)\n width = self.width\n width_per_child = width / number_of_children\n\n positions = range(0, width, width_per_child)\n for position, child in zip(positions, self.children):\n child.height = self.height\n child.x = self.x + position\n child.y = self.y\n child.width = width_per_child\n\n def on_size(self, *args):\n self.do_layout()\n\n def on_pos(self, *args):\n self.do_layout()\n\n def add_widget(self, widget):\n super(RootWidget, self).add_widget(widget)\n self.do_layout()\n\n def remove_widget(self, widget):\n super(RootWidget, self).remove_widget(widget)\n self.do_layout()\n\nrunTouchApp(RootWidget())\n","repo_name":"inclement/kivycrashcourse","sub_path":"video10-thinking_about_layout/after.py","file_name":"after.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"47"} +{"seq_id":"4453010948","text":"import re\n\nfrom pathlib import Path\nfrom cohortextractor import patients, codelist\n\nBASE_DIR = Path(__file__).parents[1]\nOUTPUT_DIR = BASE_DIR / \"../output\"\nANALYSIS_DIR = BASE_DIR / \"../analysis\"\n\n\ndef generate_expectations_codes(codelist, incidence=0.5):\n expectations = {str(x): (1 - incidence) / len(codelist) for x in codelist}\n expectations[None] = incidence\n return expectations\n\n\ndef loop_over_codes(numeric, question_str, code_list):\n def make_variable(code):\n return {\n f\"flucats_question_{question_str}_{code}\": patients.with_these_clinical_events(\n codelist([code], system=\"snomed\"),\n between=[\"flucats_template_date\", \"flucats_template_date + 1 day\"],\n returning=\"binary_flag\",\n find_last_match_in_period=True,\n return_expectations={\n \"category\": {\"ratios\": generate_expectations_codes([code])}\n },\n )\n }\n\n def make_variable_numeric(code):\n return {\n f\"flucats_question_numeric_value_{question_str}_{code}_value\": patients.with_these_clinical_events(\n codelist([code], system=\"snomed\"),\n between=[\"flucats_template_date\", \"flucats_template_date + 1 day\"],\n returning=\"numeric_value\",\n find_last_match_in_period=True,\n return_expectations={\n \"float\": {\"distribution\": \"normal\", \"mean\": 45.0, \"stddev\": 20},\n \"incidence\": 0.5,\n },\n ),\n f\"flucats_question_numeric_value_{question_str}_{code}\": patients.with_these_clinical_events(\n codelist([code], system=\"snomed\"),\n between=[\"flucats_template_date\", \"flucats_template_date + 1 day\"],\n returning=\"binary_flag\",\n find_last_match_in_period=True,\n return_expectations={\n \"category\": {\"ratios\": generate_expectations_codes([code])}\n },\n ),\n }\n\n variables = {}\n\n if numeric:\n for code in code_list:\n variables.update(make_variable_numeric(code))\n\n else:\n for code in code_list:\n variables.update(make_variable(code))\n return variables\n\n\ndef match_input_files(file: str) -> bool:\n pattern = (\n r\"^input_([a-zA-Z]+\\_)*20\\d\\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])\\.csv\"\n )\n return bool(re.match(pattern, file))\n\n\ndef get_date_input_file(file: str) -> str:\n \"\"\"\n Gets the date in format YYYY-MM-DD from input file name string\n \"\"\"\n\n if not match_input_files(file):\n raise Exception(\"Not valid input file format\")\n date = re.search(r\"(\\d{4}-\\d{2}-\\d{2})\", file)\n return date.group(1)\n","repo_name":"opensafely/FLUCATS","sub_path":"analysis/analysis/study_utils.py","file_name":"study_utils.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22672566622","text":"# -*- coding: utf-8 -*-\r\n\r\n# http://www.somascape.org/midi/tech/mfile.html\r\n# http://www.music.mcgill.ca/~ich/classes/mumt306/StandardMIDIfileformat.html\r\n# http://www.music-software-development.com/midi-tutorial.html\r\n# https://www.csie.ntu.edu.tw/~r92092/ref/midi/\r\n# http://midi.teragonaudio.com/tech/midispec.htm\r\n# http://midi.teragonaudio.com/tech/midispec/run.htm\r\n# http://www.recordingblogs.com/sa/Wiki/topic/Musical-Instrument-Digital-Interface-MIDI\r\n\r\nfrom mxm.midifile.src import constants as c\r\nfrom mxm.midifile.src.event_dispatcher import EventDispatcher\r\n\r\nclass OutTest:\r\n def header(self, format, nTracks, division):\r\n print (format, nTracks, division)\r\n def reset_running_status(self):\r\n self._running_status = None\r\n def set_running_status(self, status):\r\n self._running_status = status\r\n def reset_time(self):\r\n self.time = 0\r\n def set_current_track(self, n):\r\n self.current_track = n\r\n def start_of_track(self, current_track):\r\n print ('start_of_track: %s' % current_track)\r\n def update_time(self, new_time=0, relative=1):\r\n print('update_time: %s' % new_time)\r\n def note_on(self, channel, note, velocity, use_running_status=False):\r\n print ('note_on', channel, note, velocity, use_running_status)\r\n def note_off(self, channel, note, velocity, use_running_status=False):\r\n print ('note_off', channel, note, velocity, use_running_status)\r\n def eof(self):\r\n print('eof')\r\n\r\nclass MidiFileParser:\r\n \"\"\"\r\n The MidiFileParser is the lowest level parser that see the data as \r\n midi data. It generates events that gets triggered on the MidiEvents handler.\r\n >>> import io\r\n >>> from mxm.midifile import RawInstreamFile\r\n >>> from mxm.midifile.src.data_type_converters import writeBew, writeVar\r\n\r\n First we set up the header data for a simple midi file with 1 track\r\n >>> f = io.BytesIO()\r\n >>> mthd = b'MThd'\r\n >>> header_chunk_size = writeBew(6, 4)\r\n >>> format = writeBew(0, 2) # Format 0\r\n >>> nTracks = writeBew(1, 2) # One track\r\n >>> division = writeBew(480, 2) # \r\n >>> mthd, list(header_chunk_size), list(format), list(nTracks), list(division)\r\n (b'MThd', [0, 0, 0, 6], [0, 0], [0, 1], [1, 224])\r\n >>> (f.write(mthd), f.write(header_chunk_size), f.write(format), f.write(nTracks), f.write(division))\r\n (4, 4, 2, 2, 2)\r\n\r\n Then we set up some track data\r\n >>> tracklength = 0\r\n >>> start_time = bytes(writeVar(0))\r\n >>> tracklength += len(start_time)\r\n >>> note_on = bytes([0x90, 64, 64])\r\n >>> tracklength += len(note_on)\r\n >>> note_off_time = bytes(writeVar(96))\r\n >>> tracklength += len(note_off_time)\r\n >>> note_off = bytes([0x80, 64, 64])\r\n >>> tracklength += len(note_off)\r\n >>> (f.write(b'MTrk'), f.write(writeBew(tracklength, 4)), f.write(start_time), f.write(note_on), f.write(note_off_time), f.write(note_off))\r\n (4, 4, 1, 3, 1, 3)\r\n\r\n And finally we parse the data, first the metadata\r\n >>> r_in = RawInstreamFile(f)\r\n >>> p = MidiFileParser(r_in, OutTest())\r\n >>> p.parseMThdChunk()\r\n 0 1 480\r\n \r\n then the tracks\r\n >>> p.parseMTrkChunks()\r\n start_of_track: 0\r\n update_time: 0\r\n note_on 0 64 64 False\r\n update_time: 96\r\n note_off 0 64 64 False\r\n eof\r\n \"\"\"\r\n\r\n def __init__(self, raw_in, event_handler):\r\n \"\"\"\r\n raw_data is the raw content of a midi file as bytes.\r\n \"\"\"\r\n # internal values, don't mess with 'em directly\r\n self.raw_in = raw_in\r\n self.dispatch = EventDispatcher(event_handler)\r\n # running status is only implemented for Voice Category messages (ie, Status is 0x80 to 0xEF).\r\n self.reset_running_status()\r\n\r\n def reset_running_status(self):\r\n self._running_status = None\r\n self._use_running_status = False\r\n self.dispatch.reset_running_status()\r\n \r\n def set_running_status(self, status):\r\n self._running_status = status\r\n self.dispatch.set_running_status(status)\r\n\r\n def get_running_status(self):\r\n return self._running_status\r\n\r\n\r\n def parseMThdChunk(self):\r\n \"\"\"\r\n Parses the header chunk\r\n \"\"\"\r\n raw_in = self.raw_in\r\n header_chunk_type = raw_in.nextSlice(4)\r\n header_chunk_size = raw_in.readBew(4)\r\n # check if it is a proper midi file\r\n if bytes(header_chunk_type) != b'MThd':\r\n raise TypeError(\"ERROR: It is not a valid midi file!\")\r\n # Header values are at fixed locations, so no reason to be clever\r\n self.format = raw_in.readBew(2)\r\n self.nTracks = raw_in.readBew(2)\r\n self.division = raw_in.readBew(2)\r\n # Theoretically a header larger than 6 bytes can exist\r\n # but no one has seen one in the wild\r\n # But correctly ignore unknown data if it is though\r\n if header_chunk_size > 6:\r\n raw_in.moveCursor(header_chunk_size-6)\r\n # call the header event handler on the stream\r\n self.dispatch.header(self.format, self.nTracks, self.division)\r\n\r\n\r\n def parseMTrkChunks(self):\r\n \"Parses all track chunks.\"\r\n for t in range(self.nTracks):\r\n self._current_track = t\r\n self.parseMTrkChunk() # this is where it's at!\r\n self.dispatch.eof()\r\n\r\n\r\n def parseMTrkChunk(self):\r\n \"Parses a track chunk. This is the most important part of the parser.\"\r\n # set time to 0 at start of a track\r\n self.dispatch.reset_time()\r\n dispatch = self.dispatch\r\n raw_in = self.raw_in\r\n# print('raw_in.data[raw_in.getCursor():raw_in.getCursor()+20] %s' % raw_in.data[raw_in.getCursor():raw_in.getCursor()+20] )\r\n# print('raw_in.data[raw_in.getCursor():raw_in.getCursor()+20] %s' % list(raw_in.data[raw_in.getCursor():raw_in.getCursor()+20]) )\r\n # Trigger event at the start of a track\r\n dispatch.start_of_track(self._current_track)\r\n # position cursor after track header\r\n raw_in.moveCursor(4)\r\n # unsigned long is 4 bytes\r\n tracklength = raw_in.readBew(4)\r\n# print ('tracklength: %s' % tracklength)\r\n track_endposition = raw_in.getCursor() + tracklength # absolute position!\r\n\r\n# print(raw_in.data[raw_in.getCursor():raw_in.getCursor()+20]) \r\n# print(list(raw_in.data[raw_in.getCursor():raw_in.getCursor()+20]))\r\n\r\n while raw_in.getCursor() < track_endposition:\r\n \r\n # find relative time of the event\r\n time = raw_in.readVarLen()\r\n dispatch.update_time(time)\r\n \r\n # running status is only implemented for Voice Category\r\n # messages (ie, Status is 0x80 to 0xEF).\r\n peak_ahead = raw_in.readBew(move_cursor=0)\r\n# print('peak_ahead: %s' % peak_ahead)\r\n if (peak_ahead & 0b10000000): \r\n # the status byte has the high bit set, so it\r\n # was not running data but proper status byte\r\n status = raw_in.readBew()\r\n self.set_running_status(status)\r\n self._use_running_status = False\r\n else:\r\n # use that darn running status\r\n status = self.get_running_status()\r\n self._use_running_status = True\r\n\r\n # while I am almost certain that no realtime \r\n # messages will pop up in a midi file, I might need to \r\n # change my mind later.\r\n\r\n # we need to look at nibbles here\r\n# print('='*80)\r\n# print (status)\r\n hi_nible, lo_nible = (status & 0xF0) >> 4, status & 0x0F\r\n if hi_nible == 0xF:\r\n self.reset_running_status()\r\n \r\n # match up with events\r\n\r\n # Is it a meta_event ??\r\n # these only exists in midi files, not in transmitted midi data\r\n # In transmitted data META_EVENT (0xFF) is a system reset\r\n if status == c.META_EVENT:\r\n meta_type = raw_in.readBew()\r\n meta_length = raw_in.readVarLen()\r\n meta_data = raw_in.nextSlice(meta_length)\r\n dispatch.meta_event(meta_type, meta_data)\r\n\r\n\r\n # Is it a sysex_event ??\r\n elif status == c.SYSTEM_EXCLUSIVE:\r\n # ignore sysex events\r\n sysex_length = raw_in.readVarLen()\r\n # don't read sysex terminator\r\n sysex_data = raw_in.nextSlice(sysex_length-1)\r\n # only read last data byte if it is a sysex terminator\r\n # It should allways be there, but better safe than sorry\r\n if raw_in.readBew(move_cursor=0) == c.END_OFF_EXCLUSIVE:\r\n eo_sysex = raw_in.readBew()\r\n dispatch.sysex_event(sysex_data)\r\n # the sysex code has not been properly tested, and might be fishy!\r\n\r\n\r\n # is it a system common event?\r\n elif hi_nible == 0xF0: # Hi bits are set then\r\n data_sizes = {\r\n c.MTC:1,\r\n c.SONG_POSITION_POINTER:2,\r\n c.SONG_SELECT:1,\r\n }\r\n data_size = data_sizes.get(hi_nible, 0)\r\n common_data = raw_in.nextSlice(data_size)\r\n common_type = lo_nible\r\n dispatch.system_common(common_type, common_data)\r\n \r\n\r\n # Oh! Then it must be a midi event (channel voice message)\r\n else:\r\n data_sizes = {\r\n c.PATCH_CHANGE:1,\r\n c.CHANNEL_PRESSURE:1,\r\n c.NOTE_OFF:2,\r\n c.NOTE_ON:2,\r\n c.AFTERTOUCH:2,\r\n c.CONTINUOUS_CONTROLLER:2,\r\n c.PITCH_BEND:2,\r\n }\r\n data_size = data_sizes.get(hi_nible, 0)\r\n channel_data = raw_in.nextSlice(data_size)\r\n event_type, channel = hi_nible, lo_nible\r\n# print ((event_type, channel, channel_data, self._use_running_status))\r\n dispatch.channel_message(event_type, channel, channel_data, self._use_running_status)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n import doctest\r\n doctest.testmod() # run test on inline examples first\r\n\r\n # # get data\r\n # test_file = '/home/maxm/instances/midienv/mxm.midifile-1.0/mxm/midifile/tests/midifiles/midiout-0001.mid'\r\n\r\n # # do parsing\r\n # from mxm.midifile import MidiToCode\r\n # from mxm.midifile import RawInstreamFile\r\n\r\n # midi_in = MidiFileParser(RawInstreamFile(test_file), MidiToCode())\r\n # midi_in.parseMThdChunk()\r\n # midi_in.parseMTrkChunks()\r\n \r\n","repo_name":"maxmcorp/mxm.midifile","sub_path":"mxm/midifile/src/midi_file_parser.py","file_name":"midi_file_parser.py","file_ext":"py","file_size_in_byte":10748,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"47"} +{"seq_id":"28890628139","text":"import gc\nfrom math import floor \nfrom random import uniform, choice\nfrom Beagle import API as BGL\nfrom Newfoundland.Camera import Camera\nfrom Newfoundland.Controllers import Controllers\nfrom Newfoundland.Player import Player\nfrom Newfoundland.BaseGame import BaseGame\n\nfrom .DungeonFloor import DungeonFloor\nfrom .DungeonCamera import DungeonCamera\nfrom .KPlayer import KPlayer\nfrom .KTState import KTState\n\nfrom .Universe.AreaLoader import get_area_data\nfrom .Universe.AreaCompiler import AreaCompiler\nfrom .GeneratorOptions import GeneratorOptions\n\nfrom .Background import Background\nfrom .CloudBackground import CloudBackground\nfrom .Fog import Fog\n\nfrom .Abilities import Abilities\nfrom .Renderers.uniform_fade import uniform_fade \n\nfrom .Sequences import Sequences\nfrom .ParallaxBackground import ParallaxBackground\nfrom .Universe.LevelEffects.AttackInfo import AttackInfo\n\nfrom .Menu.Menu import Menu\nfrom .Menu.SummaryPage import SummaryPage\n\nPREBUFFER = 0\nPREBUFFER_R = 0\n\nclass Game( BaseGame ):\n\n god_buffer = BGL.framebuffer.from_screen()\n god_shader = BGL.assets.get(\"KT-compositor/shader/god\")\n\n paused = False\n area_name = None\n main_menu = True\n ###############\n\n def trigger_cinematic(self,key):\n if key == \"warp\":\n from .Cinematics.WarpCinematic import WarpCinematic\n self.active_cinematic = WarpCinematic()\n self.active_cinematic.game = self\n elif key == \"intro\":\n from .Cinematics.IntroCinematic import IntroCinematic\n self.active_cinematic = IntroCinematic()\n self.active_cinematic.game = self\n elif key == \"beard\":\n from .Cinematics.BeardCinematic import BeardCinematic\n self.active_cinematic = BeardCinematic()\n self.active_cinematic.game = self\n\n def build_area_test(self):\n area_raw = BGL.assets.get(\"KT-forest/textfile/area_test\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( tilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n return floor\n\n def build_area_tower(self):\n area_raw = BGL.assets.get(\"KT-forest/textfile/tower\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( tilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n return floor\n\n def build_area_doortest(self):\n area_raw = BGL.assets.get(\"KT-forest/textfile/doortest\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( music=BGL.assets.get('KT-player/path/polydrone'),\n\n god_shader = BGL.assets.get(\"KT-compositor/shader/ship_god\"),\ntilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n return floor\n\n def build_area_arena(self):\n area_raw = BGL.assets.get(\"KT-forest/textfile/arena\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( tilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n return floor\n\n def build_area_docks(self):\n area_raw = BGL.assets.get(\"KT-forest/textfile/docks\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( tilescale =2,\n god_shader = BGL.assets.get(\"KT-compositor/shader/ship_god\"),\n\n width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.using_tilemap = False\n floor.game = self\n return floor\n\n def build_area_oort_cloud(self):\n\n #GeneratorOptions.TreeTopTextures = [\n # BGL.assets.get(\"KT-forest/texture/crystal_1\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_2\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_3\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_4\")\n #]\n\n #GeneratorOptions.TreeShadowTextures = GeneratorOptions.TreeTopTextures\n\n area_raw = BGL.assets.get(\"KT-forest/textfile/oort_cloud\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( \n title = \"The Oort Cloud\",\n god_shader = BGL.assets.get(\"KT-compositor/shader/oort_god\"),\n fog_level_base=0.5, tilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n return floor\n\n def build_area_crystals1(self):\n\n #GeneratorOptions.TreeTopTextures = [\n # BGL.assets.get(\"KT-forest/texture/crystal_1\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_2\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_3\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_4\")\n #]\n\n #GeneratorOptions.TreeShadowTextures = GeneratorOptions.TreeTopTextures\n\n area_raw = BGL.assets.get(\"KT-forest/textfile/crystals1\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( \n title = \"Crystaline Structure I.\",\n god_shader = BGL.assets.get(\"KT-compositor/shader/ship\"),\n fog_level_base=0.5, tilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n floor.music = BGL.assets.get(\"KT-player/path/lacuna_canal\")\n\n floor.vision_mute = 0.7\n floor.bg_texture = BGL.assets.get(\"KT-forest/texture/lightmap\")\n floor.sky_texture = BGL.assets.get(\"KT-forest/texture/background\")\n floor.parallax_sky = 0.005\n floor.parallax_bg = 0.01\n\n return floor\n\n def build_area_ship_type(self, key):\n # a generic level template with good settings for smaller areas without a lot of lighting requirements \n #GeneratorOptions.TreeTopTextures = [\n # BGL.assets.get(\"KT-forest/texture/crystal_1\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_2\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_3\"),\n # BGL.assets.get(\"KT-forest/texture/crystal_4\")\n #]\n\n #GeneratorOptions.TreeShadowTextures = GeneratorOptions.TreeTopTextures\n\n area_raw = BGL.assets.get(\"KT-forest/textfile/\"+key)\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( \n music = BGL.assets.get(\"KT-player/path/ship_music\"),\n title = \"The Xeoliex\",\n god_shader = BGL.assets.get(\"KT-compositor/shader/ship_god\"),\n fuzz_amt = 0.8,\n fog_level_base=0.5, \n tilescale =2, \n uses_vision=True, \n fade_vision_amt=0.62, \n width = area_def[\"width\"]*2, \n height = area_def[\"height\"]*2, \n camera = self.camera, \n player = self.player, \n objects = [], \n area_def = area_def, \n vision_mute = 0.7,\n renderer_config = { \n \"vision_lightmap_width\" : 960,\n \"vision_lightmap_height\" : 540,\n \"photon_map_width\" : 1024,\n \"photon_map_height\" : 1024,\n \"static_lightmap_width\" : 1024,\n \"static_lightmap_height\" : 1024,\n \"dynamic_lightmap_width\" : 960,\n \"dynamic_lightmap_height\" : 540,\n \"photon_mapper_config\" : {\n 'stream' : True,\n 'photon_radius' :70.0,\n 'photon_emitter_power' : 0.01,\n 'photon_decay' : 0.9,\n 'photon_decay_jitter' : 0.4,\n 'photon_max_bounces' : 9,\n 'num_photons' : 8,\n 'photon_observe_chance' : 0.8\n },\n }\n )\n\n floor.game = self\n return floor\n\n def build_area_ship(self):\n return self.build_area_ship_type(\"ship\")\n\n def build_area_lacuna_canal(self):\n floor = self.build_area_ship_type(\"lacuna_canal\")\n floor.title = \"Unknown Origins\"\n floor.uses_vision = True\n\n floor.music = BGL.assets.get(\"KT-player/path/lacuna_canal\")\n floor.sky_texture = BGL.assets.get(\"KT-forest/texture/starfield1\")\n floor.bg_texture = BGL.assets.get(\"KT-forest/texture/nebula\")\n floor.bg_mode = \"add\"\n floor.parallax_sky = 0.01\n floor.parallax_bg = 0.04\n floor.fog_level_base = 0.1\n floor.fuzz_amt = 0.3\n floor.vision_mute = 0.6\n return floor\n\n def build_area_chase(self):\n floor = self.build_area_ship_type(\"chase\")\n floor.title = \"...\"\n floor.uses_vision = True\n\n floor.music = None\n floor.sky_texture = BGL.assets.get(\"KT-forest/texture/electrotrash\")\n floor.bg_texture = BGL.assets.get(\"KT-forest/texture/electrofore\")\n floor.parallax_sky = 0.01\n floor.parallax_bg = 0.04\n floor.fog_level_base = 1.0 #investigate\n\n floor.uses_vision = True\n return floor\n\n def build_area_platform(self):\n floor = self.build_area_ship_type(\"platform\")\n floor.title = \"...\"\n floor.uses_vision = True\n\n floor.music = None\n floor.sky_texture = BGL.assets.get(\"KT-forest/texture/starfield1\")\n floor.bg_texture = BGL.assets.get(\"KT-forest/texture/nebula\")\n floor.parallax_sky = 0.01\n floor.parallax_bg = 0.04\n floor.fog_level_base = 1.0\n\n floor.uses_vision = True\n floor.god_shader = BGL.assets.get(\"KT-compositor/shader/oort_god\")\n return floor\n\n def build_area_kiln(self):\n floor = self.build_area_ship_type(\"kiln\")\n floor.title = \"The Kiln\"\n floor.uses_vision = False\n floor.custom_background = CloudBackground()\n\n floor.music = None\n floor.sky_texture = BGL.assets.get(\"KT-forest/texture/starfield1\")\n floor.bg_texture = BGL.assets.get(\"KT-forest/texture/nebula\")\n floor.parallax_sky = 0.01\n floor.parallax_bg = 0.04\n floor.fog_level_base = 1.0\n floor.override_base_zoom = 0.2\n floor.fuzz_amt = 1.4\n floor.uses_vision = True\n return floor\n\n def build_area_grey_world(self):\n area_raw = BGL.assets.get(\"KT-forest/textfile/grey_world\")\n area_def = get_area_data( area_raw )\n\n floor = DungeonFloor( \n\n bg_texture = BGL.assets.get(\"KT-forest/texture/grey_world_processed\"),\n sky_texture = BGL.assets.get(\"KT-forest/texture/grey_world_background\"),\n parallax_sky = -0.2,\n parallax_bg = 0.04,\n title = \"Background Radiation...\",\n god_shader = BGL.assets.get(\"KT-compositor/shader/radiation_god\"),\n fog_level_base=0.9, tilescale =2, width = area_def[\"width\"]*2, height = area_def[\"height\"]*2, camera = self.camera, player = self.player, objects = [], area_def = area_def )\n floor.game = self\n return floor\n\n\n ###############\n\n def load_floor( self, key, sequence = False ):\n\n self.background = Background()\n self.fog = Fog()\n self.area_name = key\n\n if sequence:\n return Sequences.build_sequence(self, sequence) \n \n floor = None\n if key == \"area_test\":\n floor = self.build_area_test()\n if key == \"docks\":\n floor = self.build_area_docks()\n if key == \"tower\":\n floor = self.build_area_tower()\n if key == \"arena\":\n floor = self.build_area_arena()\n if key == \"doortest\":\n floor = self.build_area_doortest()\n if key == \"oort_cloud\":\n floor = self.build_area_oort_cloud()\n if key == \"ship\":\n floor = self.build_area_ship()\n if key == \"lacuna_canal\":\n floor = self.build_area_lacuna_canal()\n if key == \"chase\":\n floor = self.build_area_chase()\n if key == \"platform\":\n floor = self.build_area_platform()\n if key == \"kiln\":\n floor = self.build_area_kiln()\n if key == \"grey_world\":\n floor = self.build_area_grey_world()\n if key == \"crystals1\":\n floor = self.build_area_crystals1()\n\n if(floor.god_shader):\n Game.god_shader = floor.god_shader\n\n return floor\n\n def next_area( self, area_name, target_switch = None, reset = False ):\n\n self.current_floor_key = area_name\n self.current_floor_target = target_switch\n if area_name == \"self\":\n area_name = self.area_name\n if (self.area_name is not area_name) or reset:\n #self.tickables.remove( self.floor )\n self.floor.destroy()\n result = gc.collect()\n\n self.floor = self.create_tickable( self.load_floor(area_name) )\n self.player.trigger_title( self.floor.title )\n self.floor.compositor_shader = BGL.assets.get(\"KT-compositor/shader/compositor\")\n self.player.add_dm_message(\"You teleported\")\n \n if \"bg_texture\" in self.floor.__dict__:\n Background.bg_texture = self.floor.bg_texture\n Background.sky_texture = self.floor.sky_texture\n Background.parallax_sky = self.floor.parallax_sky\n Background.parallax_bg = self.floor.parallax_bg\n if \"bg_mode\" in self.floor.__dict__:\n Background.add_blending = True\n\n\n #self.player.set_hud_message( \"{0} - {1}\".format(area_name, target_switch))\n\n for switch in self.floor.area_switches:\n if switch.switch_name == target_switch:\n self.player.p[0] = switch.p[0]\n self.player.p[1] = switch.p[1]\n switch.trigger_active = False\n\n self.camera.set_player(self.player)\n\n self.player.active_terminal = None\n if(self.floor.god_shader):\n Game.god_shader = self.floor.god_shader\n \n\n\n def create_player(self):\n return KPlayer( game = self, sight_radius = 90.0, speed = 7.00, controllers = self.controllers, texture = BGL.assets.get(\"KT-player/texture/player\"), size = [ 2.0,2.0] ) \n\n\n def trigger_fade(self, length, color = [ 0.0,0.0,0.0 ]):\n self.max_fade_amt = length\n self.fade_color = color\n self.fade_amt = 0.0\n\n\n def next_sequence(self, advance = True ):\n self.genocide_trigger_available = True\n self.player.sequence_kills = 0\n if(advance):\n self.player.time_penalty = floor(self.player.time_penalty * 1.23)\n self.player.pump_timer(\"completion\")\n self.floor.destroy()\n self.floor = self.create_tickable( self.load_floor( None, Sequences.next(advance) ) )\n self.player.trigger_title( self.floor.title )\n self.floor.compositor_shader = BGL.assets.get(\"KT-compositor/shader/compositor\")\n self.camera.set_player(self.player)\n self.player.active_terminal = None\n if(self.floor.god_shader):\n Game.god_shader = self.floor.god_shader\n\n def initialize(self):\n\n self.summary_page = None\n self.over2s = 0\n Menu.Game = Game\n self.rg = 0.0\n self.rb = 0.0\n self.rs = 0.0\n self.rt = 0.0\n self.genocide_trigger_available = True\n Sequences.initialize()\n\n self.active_cinematic = None\n self.fade_amt = 1.1\n self.max_fade_amt = 1.0\n self.fade_color = [0.0,0.0,0.0]\n\n self.abilities = Abilities\n self.doing_random_test = False\n self.prebuffer = 0\n self.camera = self.create_tickable( DungeonCamera( p = [0.0,0.0], zoom = 0.28 ) )\n self.controllers = self.create_tickable( Controllers() )\n Menu.controllers = self.controllers\n\n self.player = self.create_tickable( self.create_player() )\n BGL.keyboard.register_keydown_handler(\"1\", lambda: self.player.gun.cycle_1())\n BGL.keyboard.register_keydown_handler(\"2\", lambda: self.player.gun.cycle_2())\n #self.player = ( self.create_player() )\n\n ### ENTRY POINT\n###########################\n loading_floor = \"ship\"\n \n #self.floor = self.create_tickable(self.load_floor(loading_floor))\n self.floor = self.create_tickable(self.load_floor(None,Sequences.start_level))\n self.floor.music = Sequences.titles['1']['music']\n #self.current_floor_key = loading_floor\n self.current_floor_target = None\n self.player.trigger_title( self.floor.title )\n if \"bg_texture\" in self.floor.__dict__:\n Background.bg_texture = self.floor.bg_texture\n Background.sky_texture = self.floor.sky_texture\n Background.parallax_sky = self.floor.parallax_sky\n Background.parallax_bg = self.floor.parallax_bg\n if \"bg_mode\" in self.floor.__dict__:\n Background.add_blending = True\n\n self.floor.compositor_shader = BGL.assets.get(\"KT-compositor/shader/compositor\")\n self.camera.set_player(self.player)\n self.player_dead_frames = 0\n\n #self.trigger_cinematic(\"intro\")\n\n def render(self):\n\n if(self.prebuffer < PREBUFFER_R ):\n return\n if self.active_cinematic:\n self.active_cinematic.render()\n return\n elif( Game.main_menu):\n Menu.render()\n return\n else:\n with BGL.context.render_target( Game.god_buffer):\n if not self.floor.custom_background:\n ParallaxBackground.render(self.player.p[0]*0.01,self.floor.parallax_skin, self.floor._tick)\n else:\n BGL.context.clear( 0.0,0.0,0.0,0.0);\n self.floor.custom_background.camera = self.camera\n self.floor.custom_background.render(self.floor)\n #else:\n # self.background.camera = self.camera\n # self.background.render( self.floor.vision_lightmap.get_lightmap_texture()) \n self.floor.render()\n self.fog.camera = self.camera\n #self.fog.render(self.floor, self.floor.vision_lightmap.get_lightmap_texture(),self.floor.fog_level_real+self.floor.fog_level_base) \n\n\n self.floor.god_shader.bind({\n \"g\" : self.rg,\n \"b\" : self.rb,\n \"s\" : self.rs,\n \"t\" : self.rt,\n \"tick\" : self.floor._tick\n })\n Game.god_buffer.render_processed( self.floor.god_shader )\n self.player.render_hud()\n if(self.fade_amt< self.max_fade_amt):\n fade_perc = self.fade_amt / self.max_fade_amt\n\n with BGL.blendmode.alpha_over:\n uniform_fade.apply_fadeout( fade_perc, self.fade_color )\n if self.summary_page:\n self.summary_page.render()\n\n\n def tick(self):\n self.prebuffer += 1\n if(self.prebuffer < PREBUFFER):\n return\n\n if(self.summary_page):\n self.summary_page = self.summary_page.tick()\n\n\n if(self.active_cinematic):\n cinematic_running = self.active_cinematic.tick()\n if not cinematic_running:\n self.active_cinematic = None\n if self.active_cinematic:\n return\n\n if self.main_menu:\n return Menu.tick()\n else:\n if self.floor.camera.cinema_target:\n self.over2s = (self.over2s+1)%3\n if self.over2s == 0:\n return\n\n if(self.fade_amt< self.max_fade_amt):\n self.fade_amt += 1.0\n\n\n else:\n if self.doing_random_test:\n if(uniform(0.0,1.0)<0.003):\n area = choice ( [ \"ship\",\"grey_world\",\"doortest\"] )\n self.next_area( area, None )\n if(uniform(0.0,1.0)<0.01):\n self.player.p[0] = uniform( -1*self.floor.width, self.floor.width)\n self.player.p[1] = uniform( -1*self.floor.height, self.floor.height)\n\n if(self.floor.freeze_delay < 0):\n if(self.floor.freeze_frames > 0):\n self.floor.freeze_frames = self.floor.freeze_frames - 1\n return\n else:\n self.floor.freeze_delay = self.floor.freeze_delay - 1\n\n KTState.pad = self.player.get_pad()\n KTState.start_pressed[1] = KTState.start_pressed[0]\n KTState.start_pressed[0] = KTState.pad.button_down(BGL.gamepads.buttons.START)\n \n if(KTState.start_pressed[0]==False) and (KTState.start_pressed[1]==True):\n #KTState.paused = not KTState.paused\n Game.main_menu = not Game.main_menu\n \n BaseGame.tick(self)\n\n if(self.floor.custom_background):\n self.floor.custom_background.tick()\n\n if self.player.hp <= 0:\n self.player_dead_frames += 1\n if self.player_dead_frames > 220:\n self.player.hp = 100\n if \"current_floor_key\" in dir(self) and self.current_floor_key:\n self.next_area( self.current_floor_key, self.current_floor_target, True )\n else:\n self.player.pump_timer(\"death\")\n self.next_sequence(False)\n self.player_dead_frames = 0\n\n\n \n if self.floor.genocide_enabled and self.floor.playing_genocide() and self.genocide_trigger_available:\n passed_genocide = True\n merged =self.floor.snap_enemies + self.floor.suspended_enemies\n for enemy in merged:\n if enemy.snap_type == 1:\n if not enemy.skips_genocide():\n passed_genocide = False\n break\n\n if passed_genocide:\n self.floor.passed_genocide = True\n self.genocide_trigger_available = False\n dfloor = self.floor\n def ns():\n dfloor.game.next_sequence()\n def ms():\n ai = AttackInfo( p=[ self.camera.p[0]+uniform(-15.0,15.0), self.camera.p[1]+uniform(-15.0,15.0) ], message=\"~!purified!~\")\n self.floor.sounds.play(self.floor.sounds.sequenced)\n dfloor.create_object(ai)\n \n for x in range(0,7):\n self.floor.add_timeout( [ ms, 5+(x*x) ] )\n self.floor.add_timeout( [ ns, SummaryPage.Time - 10 ] )\n self.floor.game.trigger_fade( SummaryPage.Time, [ 0.0,0.0,0.0] )\n self.summary_page = SummaryPage( self.floor )\n self.floor.player.beat_level = True\n\n\n s = 0.0\n if(self.floor.player.shield_frames>0):\n s = 1.0\n\n t = 0.0\n if(self.player.title_card.displaying()):\n t = 1.0\n if(self.summary_page):\n t = 1.0\n\n if(self.floor.camera.cinema_target):\n t = 1.0\n\n g = 0.0\n if(self.floor.playing_genocide()):\n if(self.genocide_trigger_available):\n #g = 1.0\n g = 0.2\n self.rg = g*0.3 + (self.rg*0.7)\n self.rs = s*0.3 + (self.rs*0.7)\n self.rt = t*0.1 + (self.rt*0.9)\n\n self.rb = self.rb * 0.94\n if(self.rb<0.01):\n self.rb = 0.0\n\n\n #if self.player.sequence_kills >= 4:\n # self.next_sequence(True)\n\n\n","repo_name":"dzz/kthuune","sub_path":"src/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":23729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4106452931","text":"import numpy as np\nfrom pickle import dump\nfrom PIL import Image\n\n\ndef obtain_one_picture(image):\n \"\"\" obtain one picture vector from picture\n\n :param image: read image from img file\n :return: one picture vector\n \"\"\"\n if isinstance(image, str):\n try:\n image = Image.open(image)\n except Exception as e:\n print(e)\n mat = np.array(image)\n # remove background\n mat[mat >= 200] = 0\n mat[mat > 0] = 1\n return mat.ravel()\n\n\ndef create_train_data(path):\n \"\"\" create train data for model\n\n :return: None\n \"\"\"\n x = []\n y = []\n m_dict = {}\n num_set = set(range(1, 5))\n w = 0\n for i in num_set:\n for j in (num_set - {i}):\n for k in (num_set - {i, j}):\n for p in (num_set - {i, j, k}):\n num_str = str(i) + str(j) + str(k) + str(p)\n try:\n l = obtain_one_picture(path + \"/\" + num_str + \".jpg\").tolist()\n if l[0] is not None:\n x.append(l)\n y.append(w)\n m_dict[w] = num_str\n w += 1\n except Exception as e:\n print(e)\n\n # create array for x data and y data\n x_data = np.array(x)\n y_data = np.array(y)\n # save x_data and y_data into the npy file\n np.save(\"train_data/x_data.npy\", x_data)\n np.save(\"train_data/y_data.npy\", y_data)\n # # save m_dict\n with open(\"img/m_dict.pkl\", \"wb\") as f:\n dump(m_dict, f)\n\nif __name__ == '__main__':\n img_path = \"img\"\n # create train data and save image map\n create_train_data(img_path)\n","repo_name":"longxiaoyun/sina_weibo_login","sub_path":"process_picture.py","file_name":"process_picture.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"26502779613","text":"# Create your views here.\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom edrink.forms import EditProfileForm\n\n\ndef edit_profile(request):\n user = request.user\n form = EditProfileForm(request.POST or None,\n initial={'pk': user.pk, 'username': user.username, 'avatar': user.avatar})\n if request.method == 'POST':\n if form.is_valid():\n user.avatar = request.FILES['avatar']\n user.save()\n return HttpResponseRedirect('%s' % (reverse('admin:index')))\n\n context = {\n \"form\": form\n }\n\n return render(request, \"admin/edit_profile.html\", context)\n","repo_name":"mihalispap/EDrink","sub_path":"edrink/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8754525387","text":"import logging\nimport threading\nimport time\nimport random\n\nLOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'\nlogging.basicConfig(level=logging.INFO, format=LOG_FORMAT)\n\n\nsemaphore = threading.Semaphore(0)\nitem = 0\n\n\ndef supplier():\n logging.info('Supplier adds stock')\n semaphore.acquire()\n logging.info('Supplier notify: number of items {}'.format(item))\n\n\ndef customer():\n global item\n time.sleep(3)\n item = random.randint(0, 1000)\n logging.info('Customer buy items.')\n semaphore.release()\n\n\ndef main():\n for i in range(10):\n t1 = threading.Thread(target=supplier)\n t2 = threading.Thread(target=customer)\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"kerjabhakti/SISTER_3B","sub_path":"Chapter002/1204049_Zian Asti Dwiyanti/Semaphore.py","file_name":"Semaphore.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"7756363232","text":"from datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom flask import Blueprint, jsonify, redirect, render_template, request, url_for\nfrom flask_login import login_required, login_user, logout_user\nfrom werkzeug.security import check_password_hash\nfrom app import db \nfrom app.common.sql_utils import SqlUtils\nfrom app.modelos.folios import Folios\nimport xmltodict \n\n\napi_folios = Blueprint('api_folios', __name__, url_prefix='/api/mantenedores/folios')\n\n@api_folios.route(\"/agregar\", methods=[\"PUT\"])\n@login_required\ndef agregar_folios():\n archivo = request.files['file']\n contenido = archivo.read()\n contenido = xmltodict.parse(contenido)\n fecha_asignacion = contenido [\"AUTORIZACION\"][\"CAF\"][\"DA\"][\"FA\"]\n rango_desde = contenido [\"AUTORIZACION\"][\"CAF\"][\"DA\"][\"RNG\"][\"D\"]\n rango_hasta = contenido [\"AUTORIZACION\"][\"CAF\"][\"DA\"][\"RNG\"][\"H\"]\n rsapk = contenido [\"AUTORIZACION\"][\"CAF\"][\"DA\"][\"RSAPK\"][\"M\"]\n frma = contenido [\"AUTORIZACION\"][\"CAF\"][\"FRMA\"][\"#text\"]\n rsask = contenido [\"AUTORIZACION\"][\"RSASK\"]\n rsapubk = contenido [\"AUTORIZACION\"][\"RSAPUBK\"]\n\n folios = Folios()\n folios.fecha_asignacion = fecha_asignacion\n folios.rango_desde = rango_desde\n folios.rango_hasta = rango_hasta\n folios.rsapk = rsapk\n folios.frma = frma\n folios.rsask = rsask\n folios.rsapubk = rsapubk\n\n db.session.add(folios)\n db.session.commit()\n\n return jsonify({\"status\":'ok'}), 200\n\n@api_folios.route(\"/listar\", methods=[\"GET\"])\n@login_required\ndef listar_folios():\n pagina_lenght = int(request.args.get(\"length\"))\n start = int(request.args.get(\"start\"))\n pagina_index = int(start / pagina_lenght + 1)\n draw = int(request.args.get(\"draw\"))\n \n query = db.session.query(Folios.id, Folios.fecha_asignacion, Folios.rango_desde, Folios.rango_hasta, Folios.ultimo_utilizado).paginate(page=pagina_index, per_page=pagina_lenght, error_out=False)\n rows = query.items\n data = SqlUtils.rows_to_dict(rows)\n for fila in data:\n fila[\"fecha_vencimiento\"] = fila[\"fecha_asignacion\"] + relativedelta(months = 6)\n \n return jsonify({\"data\": data, \"recordsTotal\": query.total, \"draw\": draw, \"recordsFiltered\": query.total})\n\n@api_folios.route(\"/eliminar\", methods=[\"DELETE\"])\n@login_required\ndef eliminar_folios():\n valores = request.get_json()\n id = valores[\"id\"]\n folios = db.session.query(Folios).filter(Folios.id==id).first()\n db.session.delete(folios)\n db.session.commit()\n return jsonify({\"status\":'ok'}), 200\n","repo_name":"Daravena22/ProyectoDeTitulo","sub_path":"app/api/mantenedores/folios/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"851735785","text":"import pandas as pd\nfrom pandas_profiling import ProfileReport\n\n# Paths\ntry:\n path = 'C://Users//krath//PycharmProjects//input_output_files//'\n filename = 'housing_input.csv'\nexcept Exception as ex:\n print(\"Error while fetching the file\")\n exit()\n\ninput_file = path + filename\ndata_frame = pd.read_csv(input_file)\nprint(data_frame)\n\n# Generate HTML report\nprofile_ = ProfileReport(data_frame)\nprofile_.to_file(output_file=path+'housing_output.html')\n","repo_name":"Kashyap-Rathore/Data_Analysis_Using_Pandas_Profiling","sub_path":"data_analysis_using_pandas_profiling.py","file_name":"data_analysis_using_pandas_profiling.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17461872536","text":"from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport time\n\nEmail = 'test@tset.com'\npassword = '123456'\n\n\nclass CrackGeetest():\n def __init__(self):\n self.url = 'https://account.geetest.com/login'\n self.browser = webdriver.Chrome()\n self.wait = WebDriverWait(self.browser, 20)\n self.email = Email\n self.password = password\n\n def get_geetest_button(self):\n \"\"\"\n 获取geetest的按钮\n :return: 按钮对象\n \"\"\"\n button = self.wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'geetest_radar_tip')))\n return button\n\n def get_position(self):\n \"\"\"\n 获取验证码的位置\n :return: 验证码位置元祖\n \"\"\"\n img = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'geetest_canvas_img')))\n time.sleep(2)\n location = img.location\n size = img.size\n top, bottom, left, right = location['y'], location['y'] + size['height'], location['x'],\\\n location['x'] + size['width']\n return top, bottom, left, right\n\n def get_geetest_image(self, name='captcha.png'):\n \"\"\"\n 获取验证码图片\n :param name:\n :return:图片对象\n \"\"\"\n top, bottom, left, right = self.get_position()\n print('验证码位置', top, bottom, left, right)\n screenshot = self.get_screenshot()\n captcha = screenshot.crop((left, top, right, bottom))\n return captcha\n\n def get_screenshot(self):\n pass\n\n def get_slider(self):\n \"\"\"\n 获取滑块\n :return:\n \"\"\"\n slider = self.wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'geetest_slider_button')))\n return slider\n\n # 点按呼出缺口\n slider = get_slider()\n slider.click()\n\n def get_track(self, distance):\n \"\"\"\n 根据偏移量获取移动轨迹\n :param distance:\n :return:\n \"\"\"\n # 移动轨迹\n track = []\n # 当前位移\n current = 0\n # 减速阀值\n mid = distance * 4/5\n # 计算间隔\n t = 0.2\n # 初速度\n v = 0\n\n while current < distance:\n if current < mid:\n a = 2 # 当位置小于减速阀值时,加速的a为正值\n else:\n a = -3 # 当前位移大于阀值时,加速度a为负值\n v0 = v\n move = v0 + 1/2 * a * t * t\n v = v0 + a * t\n current += move\n track.append(round(move))\n return track\n\n def move_to_gap(self, slider, tracks):\n \"\"\"\n 拖到滑块到缺口���\n :param slider:滑块\n :param tracks:轨迹\n :return:\n \"\"\"\n ActionChains(self.browser).click_and_hold(slider).perform()\n for x in tracks:\n ActionChains(self.browser).move_by_offset(xoffset=x, yoffset=0).perform()\n time.sleep(0.5)\n ActionChains(self.browser).release().perform()","repo_name":"mashuyang1/learning_msy","sub_path":"16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5271273983","text":"import numpy as np\n\nBOHR2ANG = 0.52917721067\nANG2BOHR = 1.889726125457828\n\n\ndef parse_xyz_str(xyz_str, ang2bohr=False):\n \"\"\"Parse a xyz string.\n\n Paramters\n ---------\n xyz_str : str\n The contents of a .xyz file.\n with_comment : bool\n Return comment line if True.\n\n Returns\n -------\n atoms : list\n List of length N (N = number of atoms) holding the\n element symbols.\n coords: np.array\n An array of shape (N, 3) holding the xyz coordinates.\n comment_line : str, optional\n Comment line if with_comment argument was True.\n \"\"\"\n\n xyz_lines = xyz_str.strip().split(\"\\n\")\n atom_num = int(xyz_lines[0].strip())\n comment_line = xyz_lines[1]\n\n # Only consider the first four items on a line\n atoms_coords = [line.strip().split()[:4]\n for line in xyz_str.strip().split(\"\\n\")[2:]\n ]\n atoms, coords = zip(*[(a, c) for a, *c in atoms_coords])\n coords = np.array(coords, dtype=np.float)\n if ang2bohr:\n coords *= ANG2BOHR\n return atoms, coords\n\n\ndef parse_xyz_file(xyz_fn, ang2bohr=False):\n with open(xyz_fn) as handle:\n xyz_str = handle.read()\n\n return parse_xyz_str(xyz_str, ang2bohr)\n","repo_name":"eljost/batchprep","sub_path":"batchprep/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17484196925","text":"import os\nfrom flask import Flask, request, jsonify\nimport urllib.request\nimport io\nfrom PIL import Image\nimport imagehash\n\napp = Flask(__name__)\nport = int(os.environ.get('PORT', 8022))\n\n@app.route('/images-hash-filter', methods=['POST'])\ndef hash_images():\n post_data = request.get_json()\n urls = post_data['urls']\n\n images = []\n hashes = []\n\n for url in urls:\n if not url:\n continue\n\n try:\n image = Image.open(io.BytesIO(urllib.request.urlopen(url).read()))\n except (OSError, urllib.error.URLError):\n # Skip the current URL if it's not a valid image file\n continue\n\n hash = imagehash.average_hash(image)\n\n is_duplicate = False\n for h in hashes:\n if hash - h < 15: # adjust threshold as needed\n is_duplicate = True\n break\n\n if not is_duplicate:\n images.append(url)\n hashes.append(hash)\n\n return jsonify({'result': images})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=port)","repo_name":"Waiviogit/waivio_image_duplicate_checker","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40840439267","text":"import logging\n\nimport openerp.addons.decimal_precision as dp\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import ValidationError\n\n_logger = logging.getLogger(__name__)\n\nSTATE_SELECTION = [\n ('draft', 'Draft'),\n ('pending', 'Approval Pending'),\n ('approved', 'Approved'),\n ('denied', 'Denied'),\n ('po_created', 'PO Created'),\n]\n\nREADONLY_STATES = {\n 'pending': [('readonly', True)],\n 'approved': [('readonly', True)],\n 'denied': [('readonly', True)],\n 'po_created': [('readonly', True)],\n}\n\n\nclass PurchaseRequest(models.Model):\n _name = 'purchase.request'\n _order = 'date_request desc, id desc'\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n _track = {\n 'state': {\n 'purchase_request.mt_request_sent':\n lambda self, cr, uid, obj, ctx=None: obj.state in ['pending']\n },\n }\n\n @api.depends('purchase_line.price_total')\n def _amount_all(self):\n for request in self:\n amount_untaxed = amount_tax = 0.0\n for line in request.purchase_line:\n amount_untaxed += line.price_subtotal\n amount_tax += line.price_tax\n request.update({\n 'amount_untaxed': request.currency_id.round(amount_untaxed),\n 'amount_tax': request.currency_id.round(amount_tax),\n 'amount_total': amount_untaxed + amount_tax,\n })\n\n @api.depends('employee_id')\n def _is_employee(self):\n for e in self:\n e.is_employee = e.employee_id == self.env.user\n\n @api.depends('validator_id')\n def _is_validator(self):\n for v in self:\n v.is_validator = v.validator_id == self.env.user\n\n name = fields.Char(\n string=\"Purchase Request\",\n required=True, select=True, copy=False,\n default=lambda a: '/', states=READONLY_STATES,\n help=\"Unique number of the purchase request, \\\n computed automatically when the purchase request is created.\")\n partner_id = fields.Many2one(\n 'res.partner',\n string=\"Supplier Reference\", copy=True,\n help=\"Supplier\", states=READONLY_STATES)\n description = fields.Char(\n string=\"Purchase Description\", states=READONLY_STATES)\n date_request = fields.Datetime(string=\"Request Date\", required=True,\n copy=True, default=fields.Datetime.now(),\n states=READONLY_STATES)\n currency_id = fields.Many2one(\n 'res.currency', string=\"Currency\",\n required=True, states=READONLY_STATES,\n default=lambda s: s.env.user.company_id.currency_id.id)\n state = fields.Selection(selection=STATE_SELECTION, string=\"Status\",\n readonly=True, help=\"Status\",\n select=True, copy=False, default=\"draft\")\n purchase_line = fields.One2many(\n 'purchase.request.line',\n 'purchase_request_id',\n 'Request Lines',\n states=READONLY_STATES,\n copy=True)\n employee_id = fields.Many2one('res.users',\n string=\"Requested By\",\n required=True, copy=True,\n default=lambda s: s.env.user,\n states=READONLY_STATES)\n is_employee = fields.Boolean(string=\"Is Employee Responsible\",\n compute='_is_employee')\n validator_id = fields.Many2one(\n 'res.users', string=\"Validated by\", copy=False)\n is_validator = fields.Boolean(string=\"Is Validator Responsible\",\n compute='_is_validator')\n notes = fields.Text('Terms and Conditions')\n request_type = fields.Many2one('purchase.request.type',\n string=\"Purchase Request Type\",\n required=True, states=READONLY_STATES)\n purchase_order_id = fields.Many2one('purchase.order',\n string=\"Related PO\", readonly=True)\n amount_untaxed = fields.Float(compute='_amount_all',\n digits_compute=dp.get_precision('Account'),\n string=\"Untaxed Amount\")\n amount_tax = fields.Float(compute='_amount_all',\n digits_compute=dp.get_precision('Account'),\n string=\"Taxes\")\n amount_total = fields.Float(compute='_amount_all',\n digits_compute=dp.get_precision('Account'),\n string=\"Total\")\n fiscal_position_id = fields.Many2one(\n 'account.fiscal.position', string='Fiscal Position',\n oldname='fiscal_position')\n company_id = fields.Many2one(\n 'res.company', string=\"Company\", required=True, states=READONLY_STATES,\n default=lambda s: s.env.user.company_id)\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n if not self.partner_id:\n self.fiscal_position_id = False\n self.currency_id = False\n else:\n afpobj = self.env['account.fiscal.position']\n self.fiscal_position_id = afpobj.get_fiscal_position(\n self.partner_id.id)\n ppc = self.partner_id.property_purchase_currency_id.id\n self.currency_id = ppc or self.env.user.company_id.currency_id.id\n return {}\n\n @api.model\n def create(self, vals):\n if vals.get('name', '/') == '/':\n ir = self.env['ir.sequence']\n vals['name'] = ir.next_by_code('purchase.request') or '/'\n purchase = super(PurchaseRequest, self).create(vals)\n return purchase\n\n @api.multi\n def unlink(self):\n unlink_ids = self.env['purchase.request']\n for s in self:\n if s.state in ['draft']:\n unlink_ids |= s\n else:\n raise ValidationError(\n _(\"In order to delete a purchase request, \\\n it must be in Draft state.\"))\n\n return super(PurchaseRequest, unlink_ids).unlink()\n\n @api.multi\n def button_send(self):\n # Send Email\n '''\n This function opens a window to compose an email,\n with the purchase request template message loaded by default\n '''\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference(\n 'purchase_request',\n 'purchase_request_template')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference(\n 'mail',\n 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n ctx = dict()\n ctx.update({\n 'default_model': 'purchase.request',\n 'default_res_id': self.id,\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'mark_so_as_sent': True\n })\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n @api.model\n def _get_po_vals(self):\n # po_obj = self.env['purchase.order']\n po_vals = {\n 'origin': self.name,\n 'partner_ref': self.description,\n 'date_order': self.date_request,\n 'partner_id': self.partner_id.id,\n 'dest_address_id': self.partner_id.id,\n 'currency_id': self.currency_id.id,\n # 'validator': self.validator_id.id,\n 'notes': self.notes,\n # 'fiscal_position': self.fiscal_position_id.id or False,\n }\n return po_vals\n\n @api.model\n def _get_po_line_vals(self, po_id):\n lines = []\n for line in self.purchase_line:\n lines.append({\n 'order_id': po_id,\n 'product_id': line.product_id.id,\n 'product_uom': line.product_uom.id,\n 'name': line.description or '',\n 'product_qty': line.product_qty,\n 'price_unit': line.price_unit,\n 'taxes_id': [(6, 0, [t.id for t in line.taxes_id])],\n 'date_planned': fields.Date.today(),\n })\n return lines\n\n @api.multi\n def button_create_po(self):\n # Create PO\n self.ensure_one()\n pol_obj = self.env['purchase.order.line']\n po_vals = self._get_po_vals()\n po = self.env['purchase.order'].create(po_vals)\n po.write({'purchase_request_id': self.id})\n po_line_vals = self._get_po_line_vals(po.id)\n for line_val in po_line_vals:\n pol_obj.create(line_val)\n self.signal_workflow('create_po')\n self.write({\n 'purchase_order_id': po.id,\n 'state': 'po_created',\n })\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'purchase.order',\n 'views': [[False, 'form']],\n 'res_id': po.id,\n }\n\n\nclass PurchaseRequestType(models.Model):\n _name = 'purchase.request.type'\n _order = 'name'\n\n name = fields.Char(string=\"Request Type Name\", required=True)\n\n\nclass PurchaseRequestLine(models.Model):\n _name = 'purchase.request.line'\n\n @api.depends('product_qty', 'price_unit', 'taxes_id')\n def _compute_amount(self):\n for line in self:\n taxes = line.taxes_id.compute_all(\n line.price_unit, line.purchase_request_id.currency_id,\n line.product_qty, product=line.product_id,\n partner=line.purchase_request_id.partner_id)\n line.update({\n 'price_tax': taxes['total_included'] - taxes['total_excluded'],\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n\n purchase_request_id = fields.Many2one('purchase.request')\n product_id = fields.Many2one(\n 'product.product', string=\"Product\",\n domain=[('purchase_ok', '=', True)],\n change_default=True, required=True)\n taxes_id = fields.Many2many('account.tax', string='Taxes')\n product_uom = fields.Many2one(\n 'product.uom', string='Product Unit of Measure', required=True)\n description = fields.Char(string=\"Description\")\n product_qty = fields.Float(\n string='Quantity',\n digits_compute=dp.get_precision('Product Unit of Measure'),\n required=True, default=1.0)\n price_unit = fields.Float(\n string='Unit Price',\n required=True, digits_compute=dp.get_precision('Product Price'))\n price_subtotal = fields.Monetary(\n compute='_compute_amount', string='Subtotal', store=True)\n price_total = fields.Monetary(\n compute='_compute_amount', string='Total', store=True)\n price_tax = fields.Monetary(\n compute='_compute_amount', string='Tax', store=True)\n partner_id = fields.Many2one(\n 'res.partner', related='purchase_request_id.partner_id',\n string='Partner', readonly=True, store=True)\n currency_id = fields.Many2one(\n related='purchase_request_id.currency_id', store=True,\n string='Currency', readonly=True)\n date_request = fields.Datetime(\n related='purchase_request_id.date_request',\n string='Purchase Request Date', readonly=True)\n\n @api.onchange('product_id', 'product_qty', 'product_uom')\n def onchange_product_id(self):\n result = {}\n if not self.product_id:\n return {}\n\n if self.product_id.uom_id.category_id.id != \\\n self.product_uom.category_id.id:\n self.product_uom = self.product_id.uom_po_id\n result['domain'] = {\n 'product_uom': [('category_id', '=',\n self.product_id.uom_id.category_id.id)]}\n\n prdate = self.purchase_request_id.date_request\n seller = self.product_id._select_seller(\n self.product_id,\n partner_id=self.partner_id,\n quantity=self.product_qty,\n date=prdate and prdate[:10],\n uom_id=self.product_uom)\n\n price_unit = seller.price if seller else 0.0\n if price_unit and seller and self.purchase_request_id.currency_id \\\n and seller.currency_id != self.purchase_request_id.currency_id:\n price_unit = seller.currency_id.compute(\n price_unit, self.purchase_request_id.currency_id)\n self.price_unit = price_unit\n\n product_lang = self.product_id.with_context({\n 'lang': self.partner_id.lang,\n 'partner_id': self.partner_id.id,\n })\n self.description = product_lang.display_name\n if product_lang.description_purchase:\n self.description += '\\n' + product_lang.description_purchase\n\n taxes = self.product_id.supplier_taxes_id\n fpos = self.purchase_request_id.fiscal_position_id\n if fpos:\n self.taxes_id = fpos.map_tax(taxes)\n\n result['value'] = {\n 'description': self.description,\n 'product_uom': self.product_uom.id,\n 'product_qty': self.product_qty,\n 'taxes_id': self.taxes_id.ids,\n 'price_unit': self.price_unit,\n }\n\n return result\n\n\nclass MailComposeMessage(models.Model):\n _inherit = 'mail.compose.message'\n\n @api.multi\n def send_mail(self):\n context = dict(self._context)\n if context.get('default_model') == 'purchase.request' \\\n and context.get('default_res_id') \\\n and context.get('mark_so_as_sent'):\n pr = self.env['purchase.request']\n pr.browse(context['default_res_id']).signal_workflow('send')\n return super(MailComposeMessage, self).send_mail()\n","repo_name":"minorisa/addons-enhanced","sub_path":"purchase_request/purchase_request.py","file_name":"purchase_request.py","file_ext":"py","file_size_in_byte":14137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2052874657","text":"# Script to automate my backups, you can adapt it for your needs\n# I created this for my personal needs, so i don't recommend using it in critical stuff.\n# I DON'T HAVE ANY RESPONSIBILITY FOR ANY DAMAGE OR FILE LOST CAUSED BY THIS SCRIPT\n\nimport sys\n#print sys.version\n#import platform\n#platform.system() == 'Windows'\n\nimport shutil\nimport os\nimport stat\nimport time\nimport win32file\nimport win32api\nimport win32con\nimport pywintypes\nimport json\nimport datetime\n\n# import hashlib\n\n# TODO: use a file to set the configurations\n\nPATHLOG = r''\nLOGFILE = PATHLOG + 'dir-backup.log'\n# LOGFILE = PATHLOG + 'dir-backup-debug.log'\n#encoding='utf-8': avoiding \"UnicodeEncodeError: 'charmap' codec can't encode character\"\n#for python2 use:\n#import io -> io.open()\nif __name__ == '__main__':\n\ttry:\n\t\tLOG = open(LOGFILE, 'a', encoding='utf-8')\n\texcept IOError:\n\t\tprint ('Error opening log file')\n\t\tsys.exit()\n\n\n#C:\nINTERNAL_1_VSERIAL = 99999999\n#D:\nINTERNAL_2_VSERIAL = 0000000000\n\nEXTERNAL_SEAGATE_1_VSERIAL = 77777777\nEXTERNAL_SAMSUNG_1_VSERIAL = 444444444444\n# create many as you need\n# .\n# .\n\n# labels used in LOG\nDEVICELABELS = {\n\tINTERNAL_1_VSERIAL : 'INTERNAL C:',\n\tINTERNAL_2_VSERIAL : 'INTERNAL D:',\n\tEXTERNAL_SEAGATE_1_VSERIAL : 'EXTERNAL SEAGATE 1TB',\n\tEXTERNAL_SAMSUNG_1_VSERIAL : 'EXTERNAL SAMSUNG 2TB'\n}\n\n# some files that i don't want to move\n# because they appear in different folders and are exactly the same\nFILES_EXCEPTIONS = [\n\t'INFO.TXT',\n\t'TEST.TXT', \n]\n\nFILES_ALWAYS_REPLACE = ['INFO.TXT']\n\n# params available:\n# enter_folders (bool) -> if False, it ignores folders, copy/sync only the files in the root of orig\n# try_to_move (bool) -> move files if possible, instead of copying\n# overwrite (bool) -> always copy and overwrite files to destination\n# some examples (change to your needs):\nOPERATIONS = [\n\t{\n\t\t'orig': r'\\Users\\myuser\\Music', \n\t\t'dest': r'\\BACKUP\\Music', \n\t\t'type': 'sync',\n\t\t'orig_serial': INTERNAL_1_VSERIAL,\n\t\t'dest_serial': EXTERNAL_SEAGATE_1_VSERIAL,\n\t},\n\n\t{\n\t\t'orig': r'\\Users\\myuser\\Downloads', \n\t\t'dest': r'\\BACKUP\\Downloads', \n\t\t'type': 'copy',\n\t\t'params': {\n\t\t\t'enter_folders': False\n\t\t},\n\t\t'orig_serial': INTERNAL_1_VSERIAL,\n\t\t'dest_serial': EXTERNAL_SEAGATE_1_VSERIAL\n\t},\n\n\t{\n\t\t'orig': r'\\some-folder-in-root', \n\t\t'dest': r'\\BACKUP2\\some-folder-in-root', \n\t\t'type': 'copy',\n\t\t'params': {\n\t\t\t'try_to_move': True\n\t\t},\n\t\t'orig_serial': INTERNAL_2_VSERIAL,\n\t\t'dest_serial': EXTERNAL_SAMSUNG_1_VSERIAL,\n\t},\n\n\t{\n\t\t'orig': r'\\Users\\myuser\\folder-full-of-txts',\n\t\t'dest': r'\\BACKUP2\\folder-full-of-txts',\n\t\t'type': 'copy',\n\t\t'params': {\n\t\t\t'overwrite': True\n\t\t},\n\t\t'orig_serial': INTERNAL_1_VSERIAL,\n\t\t'dest_serial': EXTERNAL_SAMSUNG_1_VSERIAL,\t\n\t},\n]\n\nERROR_MSG = ''\n\n#testing no disk space error\nTESTSIZE = 100000\nTESTSIZE_AFTER = 10\n\n# 16*1024 = 16KiB\n# 16*1024*1024 = 16MiB\n# 16*1024*1024*1024 = 16GiB\n# better to copy large files, and we can implement a progress bar in the future\n# i found this googling, but i lost the link\ndef modified_copyfileobj(fsrc, fdst, len = 24*1024*1024):\n\twhile 1:\n\t\tbuf = fsrc.read(len)\n\t\tif not buf:\n\t\t\tbreak\n\t\tfdst.write(buf)\n\t\t#time.sleep(0.1)\n\n# find the relative path for the destination file\n# maintain the same directory structure for the destination\n# path1 is the root and path2 is some file or dir bellow the root\ndef getNewPath(dest, file, path1, path2):\n\trelative = os.path.relpath(path1, path2)\n\tif relative == '.':\n\t\trelative = ''\n\t\n\treturn os.path.join(dest, relative, file)\n\ndef isrecursive(params):\n\treturn False if 'enter_folders' in params and params['enter_folders'] == False else True\n\ndef overwrite(params):\n\treturn True if 'overwrite' in params and params['overwrite'] == True else False\n\ndef try_to_move(params):\n\treturn True if 'try_to_move' in params and params['try_to_move'] == True else False\n\ndef setctime(originalpath, newfilepath):\n\t# can be needed for CreateFileW\n\t# if os.path.isdir(newfilepath):\n\t# \tnewfilepath = '\\\\\\\\.\\\\' + newfilepath\n\n\t# win32con.FILE_ATTRIBUTE_NORMAL -> ACCESS DENIED ON DIRECTORIES\n\t# this fails sometimes, windows says another process still using the file\n\t# TODO: loop with a number of tries\n\ttry:\n\t\t# translate to correct windows time format\n\t\tctime = pywintypes.Time(os.path.getctime(originalpath))\n\n\t\thandle = win32file.CreateFile(\n\t\t\tnewfilepath,\n\t\t\twin32con.GENERIC_WRITE,\n\t\t\twin32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,\n\t\t\tNone,\n\t\t\twin32con.OPEN_EXISTING,\n\t\t\twin32con.FILE_FLAG_BACKUP_SEMANTICS, #for both, files and directories\n\t\t\tNone\n\t\t)\n\t\twin32file.SetFileTime(handle, ctime, None, None)\n\t\thandle.close()\n\n\t\t# log('DEBUG: setctime - ORIGINALPATH: '+originalpath + ' - NEWFILEPATH: '+newfilepath)\n\texcept Exception as e:\n\t\tlog('\\t\\tERROR SETCTIME: ' + str(e))\n\t\t\n\n# used for log the path without the common prefix\ndef onlysubpath(initialroot, currentroot):\n\tcommonpath = os.path.commonpath([os.path.splitdrive(initialroot)[1], os.path.splitdrive(currentroot)[1]])\n\treturn os.path.splitdrive(currentroot)[1].replace(commonpath, '', 1)\n\ndef set_original_attrs(orig, dest):\n\t# usually this will avoid an exception\n\tos.chmod(dest, stat.S_IWRITE)\t\n\n\t# copy the HIDDEN attribute, if the source is HIDDEN\n\ttry:\n\t\t# hidden attr does not work for some reason\n\t\tshutil.copystat(orig, dest)\n\n\t\tif win32con.FILE_ATTRIBUTE_HIDDEN & win32file.GetFileAttributesW(orig):\n\t\t\twin32file.SetFileAttributes(dest, win32con.FILE_ATTRIBUTE_HIDDEN)\n\texcept Exception as e:\n\t\tlog('\\t\\tERROR SET_ORIGINAL_ATTRS: ' + str(e))\n\n\t# maintain the creation time intact\n\tsetctime(orig, dest)\n\ndef check_disk_space(originalpath, newfilepath):\n\t# the problem with this approach is that the file size on disk is different than the real size\n\t# user_free_bytes, total_bytes, total_free_bytes = win32api.GetDiskFreeSpaceEx(os.path.splitdrive(newfilepath)[0])\n\t# print(os.path.splitdrive(newfilepath)[0])\n\t# print('FREE SPACE: ', int(user_free_bytes/1024/1024/1024), 'GB')\n\t# print('FILESIZE:', os.path.getsize(originalpath))\n\t\n\ttry:\n\t\tdrive = os.path.splitdrive(newfilepath)[0]\n\t\tsectors_per_cluster, bytes_per_sector, number_free_clusters, total_number_clusters = win32api.GetDiskFreeSpace(drive)\n\t\tfree_space_in_bytes = number_free_clusters * bytes_per_sector * sectors_per_cluster\n\t\tclusters = int(os.path.getsize(originalpath) / bytes_per_sector / sectors_per_cluster) + 1\n\t\tfilesize_on_disk = clusters * sectors_per_cluster * bytes_per_sector\n\n\t\t# global TESTSIZE, TESTSIZE_AFTER\n\t\t# free_space_in_bytes = TESTSIZE\n\t\tif filesize_on_disk > free_space_in_bytes:\n\t\t\ton_error_log('NO DISK SPACE - ' + drive, 'No disk space')\n\t\t\treturn False\n\t\t# TESTSIZE = TESTSIZE_AFTER\n\n\t\treturn True\n\texcept Exception as e:\n\t\ton_error_log('ERROR CHECK_DISK_SPACE: ' + str(e), 'Error checking disk space')\n\t\treturn False\n\n\t# filesize = os.path.getsize(originalpath)\n\t# print('FREE SPACE: ', free_space_in_bytes)\n\t# print('FILESIZE:', filesize)\n\t# print('FILESIZE ON DISK:', filesize_on_disk)\n\ndef move_equals(orig, dest):\n\tcount_moved = 0\n\tif not os.path.exists(orig) or not os.path.exists(dest):\n\t\treturn count_moved\n\n\tglobal FILES_EXCEPTIONS\n\tfiles_orig = {}\n\tall_orig_dirs = []\n\t# get all files from origin\n\tfor root, dirs, files in os.walk(orig):\n\t\t# store all source dirs, for use later to set file attributes in destination\n\t\tif len(dirs) > 0:\n\t\t\tfor d in dirs:\n\t\t\t\tall_orig_dirs.append(os.path.join(root, d))\n\n\t\tfor f in files:\n\t\t\tfullpath = os.path.join(root, f)\n\t\t\t# i dont think this is necessary\n\t\t\t# sha1 = hashlib.sha1()\n\t\t\t# sha1.update((f + str(os.path.getsize(fullpath))).encode('utf-8'))\n\t\t\t# sha1.hexdigest()\n\t\t\tif f.upper() in FILES_EXCEPTIONS:\n\t\t\t\tcontinue\n\n\t\t\tkey = str(os.path.getsize(fullpath)) + f\n\t\t\tfiles_orig[key] = {'root': root, 'file': f}\n\n\t# equal = []\n\t# move the same files to the same source path\n\tfor root, dirs, files in os.walk(dest):\n\t\tfor f in files:\n\t\t\tfullpath = os.path.join(root, f)\n\t\t\tkey = (str(os.path.getsize(fullpath)) + f)\n\n\t\t\t# found equal files\n\t\t\tif key in files_orig:\n\t\t\t\trel_path = os.path.relpath(files_orig[key]['root'], orig)\n\t\t\t\trel_path = rel_path if rel_path != '.' else dest\n\n\t\t\t\t# create missing dirs in the destination\n\t\t\t\tto_create = os.path.join(dest, rel_path)\n\t\t\t\tif not os.path.exists(to_create):\n\t\t\t\t\t# Python 3.2+\n\t\t\t\t\tos.makedirs(to_create, exist_ok=True)\n\n\t\t\t\t\t# Python 2.7+\n\t\t\t\t\t# try:\n\t\t\t\t\t# os.makedirs(to_create)\n\t\t\t\t\t# except OSError:\n\t\t\t\t\t# if not os.path.isdir(to_create):\n\t\t\t\t\t# raise\n\n\t\t\t\tto = os.path.join(dest, rel_path, f)\n\t\t\t\t# different paths, so move instead of copy\n\t\t\t\tif to != fullpath:\n\t\t\t\t\tsubpath_from = onlysubpath(initialroot=dest, currentroot=fullpath)\n\t\t\t\t\tsubpath_to = onlysubpath(initialroot=dest, currentroot=to)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.move(fullpath, to)\n\t\t\t\t\t\t# log('DEBUG: move_equals - FROM: '+fullpath + ' TO: '+to)\n\t\t\t\t\t\tlog('\\t\\tMOVED - FROM: ' + subpath_from + ' TO: ' + subpath_to)\n\t\t\t\t\t\tcount_moved += 1\n\t\t\t\t\t\t# equal.append('FROM: ' + fullpath + ' - TO: '+ to)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t# log('\\t\\tERROR MOVE - FROM: ' + subpath_from + ' TO: ' + subpath_to)\n\t\t\t\t\t\tlog('\\t\\tERROR MOVE_EQUALS (SHUTIL.MOVE): ' + str(e))\n\t\t\t\t\t\n\t\t\t\t\tset_original_attrs(os.path.join(files_orig[key]['root'], files_orig[key]['file']), to)\n\n\t# try to maintain the same file attributes in destination\n\tfor d in all_orig_dirs:\n\t\tpath_in_dest = getNewPath(dest, '', d, orig)\n\t\tif os.path.exists(path_in_dest):\n\t\t\tset_original_attrs(d, path_in_dest)\n\t\t\n\t# print(json.dumps(files_orig, indent=4))\n\t# print(json.dumps(equal, indent=4))\n\treturn count_moved\n\ndef copydir(orig, dest, params = []):\n\tcount_copied = 0\n\tcount_moved = 0\n\tcreated_dirs = []\n\tglobal FILES_ALWAYS_REPLACE\n\n\tif not os.path.exists(orig):\n\t\treturn count_copied, count_moved\n\n\tif try_to_move(params):\n\t\tcount_moved = move_equals(orig, dest)\n\n\n\tif not os.path.exists(dest):\n\t\tos.mkdir(dest)\n\t\t#os.chmod(dest, 0o777)\n\t\tcreated_dirs.append({'from': orig, 'to': dest})\n\t\n\t# avoid calling the same functions inside the loop\n\tv_overwrite = None\n\tv_overwrite = overwrite(params)\n\tv_isrecursive = None\n\tv_isrecursive = isrecursive(params)\n\n\t#shutil.copytree(orig, dest)\n\tfor root, dirs, files in os.walk(orig):\n\t\toverwrite_current = False\n\n\t\tif not v_isrecursive:\n\t\t\t# remove directories\n\t\t\twhile len(dirs) > 0:\n\t\t\t\tdirs.pop()\n\t\n\t\tfor d in dirs:\n\t\t\tnewdir = getNewPath(dest, d, root, orig)\n\t\t\tif not os.path.exists(newdir):\n\t\t\t\tos.mkdir(newdir)\n\t\t\t\tcreated_dirs.append({'from': os.path.join(root, d), 'to': newdir})\n\t\t\t\t\n\t\tfor f in files:\n\t\t\toriginalpath = os.path.join(root, f)\n\t\t\tnewfilepath = getNewPath(dest, f, root, orig)\n\t\t\toverwrite_current = False\n\n\t\t\t# same name but different sizes, therefore overwrites(only this file)\n\t\t\tif os.path.exists(newfilepath) and (os.path.getsize(newfilepath) != os.path.getsize(originalpath)):\n\t\t\t\toverwrite_current = True\n\n\t\t\tif f.upper() in FILES_ALWAYS_REPLACE:\n\t\t\t\toverwrite_current = True\n\n\t\t\tif not os.path.exists(newfilepath) or v_overwrite or overwrite_current:\n\t\t\t\tif not check_disk_space(originalpath, newfilepath):\n\t\t\t\t\treturn count_copied, count_moved\n\n\t\t\t\ttry:\n\t\t\t\t\tfsrc = open(originalpath, 'rb')\n\t\t\t\texcept IOError:\n\t\t\t\t\tmsg = 'ERROR COPYDIR: COULD NOT OPEN FILE: ' + originalpath\n\t\t\t\t\ton_error_log(msg, msg)\n\t\t\t\t\treturn count_copied, count_moved\n\t\t\t\ttry:\n\t\t\t\t\tfdst = open(newfilepath, 'wb')\n\t\t\t\texcept IOError:\n\t\t\t\t\tmsg = 'ERROR COPYDIR: COULD NOT OPEN FILE: ' + newfilepath\n\t\t\t\t\ton_error_log(msg, msg)\n\t\t\t\t\treturn count_copied, count_moved\n\n\t\t\t\tmodified_copyfileobj(fsrc, fdst)\n\t\t\t\tfsrc.close()\n\t\t\t\tfdst.close()\n\t\t\t\t# Windows does not release the files!!! Having problems with win32file.CreateFile afterwards\n\t\t\t\tfsrc = None\n\t\t\t\tfdest= None\n\t\t\t\t\n\t\t\t\tcount_copied += 1\n\t\t\t\tsubpath = onlysubpath(initialroot=orig, currentroot=root)\n\t\t\t\tlog('\\t\\tCOPIED: ' + subpath + '\\\\' + f)\n\t\t\t\tset_original_attrs(originalpath, newfilepath)\n\n\t\t\t\t# i was having some problems on CreateFile in the following: pywintypes.error: (32, 'CreateFile', 'The process cannot access the file because it is being used by another process.')\n\t\t\t\t# so i changd, to close() explicity\n\t\t\t\t'''\n\t\t\t\twith open(originalpath, 'rb') as fsrc:\n\t\t\t\t\twith open(newfilepath, 'wb') as fdst:\n\t\t\t\t\t\t#shutil.copyfileobj(fsrc, fdst)\n\t\t\t\t\t\tmodified_copyfileobj(fsrc, fdst)\n\t\t\t\tshutil.copystat(originalpath, newfilepath)\n\t\t\t\t'''\n\n\t# try to maintain the same file attributes in destination\n\tfor c_dir in created_dirs:\n\t\tset_original_attrs(c_dir['from'], c_dir['to'])\n\n\treturn count_copied, count_moved\n\ndef removediff(toremovepath, comparepath, params = []):\n\ttotal_removed = 0\n\n\t# avoid calling the same function inside the loop\n\tv_isrecursive = None\n\tv_isrecursive = isrecursive(params)\n\n\tfor root, dirs, files in os.walk(toremovepath):\n\t\tif not v_isrecursive:\n\t\t\t#remove directories\n\t\t\twhile len(dirs) > 0:\n\t\t\t\tdirs.pop()\n\n\t\tfor d in dirs:\n\t\t\tdircompare = getNewPath(comparepath, d, root, toremovepath)\n\t\t\tif not os.path.exists(dircompare):\n\t\t\t\tremovedir = os.path.join(root, d)\n\t\t\t\t\n\t\t\t\t#total_removed += sum([len(files) for root, dirs, files in os.walk(removedir)])\n\t\t\t\t#count number of files inside a directory and LOG removed ones\n\t\t\t\tfor r, d, files_r in os.walk(removedir):\n\t\t\t\t\ttotal_removed += len(files_r)\n\t\t\t\t\tfor f in files_r:\n\t\t\t\t\t\t#log using only the different part of the path\n\t\t\t\t\t\tlog('\\t\\tREMOVED: ' + r.replace(root, '') + '\\\\' + f)\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\t# usually this will avoid an exception\n\t\t\t\t\tos.chmod(removedir, stat.S_IWRITE)\n\n\t\t\t\t\tshutil.rmtree(removedir)\n\t\t\t\texcept:\n\t\t\t\t\tmsg = 'ERROR REMOVEDIFF: SHUTIL.RMTREE - FILE: ' + removedir\n\t\t\t\t\ton_error_log(msg, msg)\n\t\t\t\t\treturn total_removed\n\t\t\n\t\tfor f in files:\n\t\t\tfilecompare = getNewPath(comparepath, f, root, toremovepath)\n\t\t\tif not os.path.exists(filecompare):\n\t\t\t\tpathremove = os.path.join(root, f)\n\t\t\t\tos.chmod(pathremove, stat.S_IWRITE)\n\t\t\t\tos.remove(pathremove)\n\t\t\t\t\n\t\t\t\tsubpath = onlysubpath(initialroot=toremovepath, currentroot=root)\n\t\t\t\tlog('\\t\\tREMOVED: ' + subpath + '\\\\' + f)\n\t\t\t\ttotal_removed += 1\n\t\n\treturn total_removed\n\n#TODO: use named tuples?\ndef syncdir(original, tosync, params = []):\n\tif not os.path.exists(original):\n\t\treturn 0, 0, 0\n\n\t# try to move files before, if possible\n\t# if there is duplicate files in the original path inside different folders, this function will move the files and then copy the same files\n\ttotal_moved = 0\n\tif try_to_move(params):\n\t\ttotal_moved = move_equals(original, tosync)\n\t\t# we don't need try to move the files once again\n\t\tdel params['try_to_move']\n\n\tif not os.path.exists(tosync):\n\t\ttotal_removed = 0\n\telse:\t\n\t\ttotal_removed = removediff(tosync, original, params)\n\t\n\tglobal ERROR_MSG\n\tif ERROR_MSG != '':\n\t\treturn total_removed, 0, total_moved\n\n\ttotal_copied, c_total_moved = copydir(original, tosync, params)\n\treturn total_removed, total_copied, total_moved\n\t\t\ndef log(text):\n\tglobal LOG\n\n\tdata = '[' + datetime.datetime.now().strftime('%d/%m/%Y') + ' ' + datetime.datetime.now().time().strftime('%H:%M:%S') + ']'\n\tLOG.write(data + ' ' + text + '\\n')\n\ndef log_endblock():\n\tlog(''.join(['-' for x in range(1,140)]))\n\ndef header_logs(operation, type='sync'):\n\tglobal DEVICELABELS\n\n\tlog('\\tSTART ' + type.upper() + ' - ' + operation['orig'] + ' TO ' + operation['dest'])\n\t# conditions in vars, better to readability\n\tcond1 = operation['orig_serial'] in DEVICELABELS\n\tcond2 = operation['dest_serial'] in DEVICELABELS\n\tif cond1 and cond2:\n\t\tlog('\\t' + DEVICELABELS[operation['orig_serial']] + ' -> ' + DEVICELABELS[operation['dest_serial']])\n\telse:\n\t\tlog('\\tNO DEVICE LABEL EXPECIFIED')\n\n\tprint('Starting ' + type + ' --> ' + operation['orig'] + ' to ' + operation['dest'])\n\tsys.stdout.flush()\n\ndef on_error_log(log_text, error_text):\n\tglobal ERROR_MSG\n\tlog(log_text)\n\tlog_endblock()\n\tERROR_MSG = error_text\n\ndef get_serial_drive_map():\n\tserial_drive_map = {}\n\n\t# [:-1] remove the last null byte\n\tdrives = win32api.GetLogicalDriveStrings()[:-1].split('\\x00')\n\tfor d in drives:\n\t\ttry:\n\t\t\tname, serial_number, max_len_filename, flags, filesystem_name = win32api.GetVolumeInformation(d)\n\t\t\t# aways get the positive number (2 complement). See: https://www.cs.cornell.edu/~tomf/notes/cps104/twoscomp.html\n\t\t\tserial_number = serial_number & 0xffffffff if serial_number < 0 else serial_number\n\t\t\t# d[:-1] remove the backslash\n\t\t\tserial_drive_map[serial_number] = d[:-1]\n\t\texcept:\n\t\t\t#ignoring 'The device is not ready.' errors. (CD/ROM drives, etc)\n\t\t\t#log maybe?\n\t\t\tpass\n\n\treturn serial_drive_map\t\n\ndef format_time_toprint(seconds):\n\thor = 0\n\tmin = 0\n\tsec = seconds\n\tif sec > 59:\n\t\tmin = sec // 60\n\t\tsec = sec % 60\n\n\t\tif min > 59:\n\t\t\thor = min // 60\n\t\t\tmin = min % 60\n\n\ttime_str = str(hor)+'h ' + str(min)+'m ' + str(sec)+'s'\n\n\treturn time_str\t\n\ndef get_operations():\n\tglobal OPERATIONS\n\tserial_drive_map = get_serial_drive_map()\n\t#print (json.dumps(serial_drive_map, indent=4))\n\n\tresult = []\n\t# remove ones that have not the correspondent drives plugged\n\t# add the drive prefix on paths\n\tfor o in OPERATIONS:\n\t\tif o['orig_serial'] in serial_drive_map and o['dest_serial'] in serial_drive_map:\n\t\t\to['orig'] = serial_drive_map[o['orig_serial']] + o['orig']\n\t\t\to['dest'] = serial_drive_map[o['dest_serial']] + o['dest']\n\t\t\tresult.append(o)\n\n\treturn result\n\ndef main():\n\toperations = get_operations()\t\n\t#print(json.dumps(operations, indent=4))\n\t#return 0\n\n\tstart = time.clock()\n\ttotal_operations = len(operations)\n\ttotal_moved = 0\n\ttotal_removed = 0\n\ttotal_copied = 0\n\tglobal ERROR_MSG\n\n\tlog('START BACKUP')\n\n\tprint ('Starting operations --> ' + str(total_operations) + ' pending')\n\tprint()\n\tsys.stdout.flush()\n\n\tfor a in operations:\n\t\tparams = []\n\t\tif 'params' in a:\n\t\t\tparams = a['params']\n\n\t\tif a['type'] == 'copy':\n\t\t\theader_logs(a, 'copy')\n\n\t\t\tstart_loop = time.clock()\n\t\t\tc, m = copydir(a['orig'], a['dest'], params)\n\t\t\tif ERROR_MSG != '':\n\t\t\t\tprint ('Error: ' + ERROR_MSG)\n\t\t\t\treturn 0\n\n\t\t\ttotal_copied += c\n\t\t\ttotal_moved += m\n\t\t\tend_loop = time.clock()\n\n\t\t\ttime_str = format_time_toprint(int(round(end_loop - start_loop)))\n\t\t\tprint ('Copy ended --> Time elapsed: ' + time_str)\n\t\t\tsys.stdout.flush()\n\n\t\t\tlog('\\t\\tFILES MOVED: ' + str(m))\n\t\t\tlog('\\t\\tFILES COPIED: ' + str(c))\n\t\t\tlog('\\t\\tTIME USED: ' + time_str)\n\t\t\tlog('\\tEND COPY - ' + a['orig'] + ' TO ' + a['dest'])\n\n\t\telif a['type'] == 'sync':\n\t\t\theader_logs(a, 'sync')\n\n\t\t\tstart_loop = time.clock()\n\t\t\tr, c, m = syncdir(a['orig'], a['dest'], params)\n\t\t\tif ERROR_MSG != '':\n\t\t\t\tprint ('Error: ' + ERROR_MSG)\n\t\t\t\treturn 0\n\n\t\t\ttotal_moved += m\n\t\t\ttotal_removed += r\n\t\t\ttotal_copied += c\n\t\t\tend_loop = time.clock()\n\n\t\t\ttime_str = format_time_toprint(int(round(end_loop - start_loop)))\n\t\t\tprint ('Sync ended --> time elapsed: ' + time_str)\n\t\t\tsys.stdout.flush()\n\n\t\t\tlog('\\t\\tFILES MOVED: ' + str(m))\n\t\t\tlog('\\t\\tFILES REMOVED: ' + str(r))\n\t\t\tlog('\\t\\tFILES COPIED: ' + str(c))\n\t\t\tlog('\\t\\tTIME USED: ' + time_str)\n\t\t\tlog('\\tEND SYNC - ' + a['orig'] + ' TO ' + a['dest'])\n\n\t\telse:\n\t\t\tprint('Unknown operation')\n\t\t\tsys.stdout.flush()\n\n\t\ttotal_operations -= 1\n\t\tprint('Finished --> ' + str(total_operations) + ' pending')\n\t\tprint()\n\t\tsys.stdout.flush()\n\t\t\t\n\tend = time.clock()\n\n\ttotal_str = format_time_toprint(int(round(end - start)))\n\tprint('Total time: ' + total_str)\n\tsys.stdout.flush()\n\n\tlog('\\tTOTAL FILES MOVED: ' + str(total_moved))\n\tlog('\\tTOTAL FILES REMOVED: ' + str(total_removed))\n\tlog('\\tTOTAL FILES COPIED: ' + str(total_copied))\n\tlog('\\tTOTAL TIME USED: ' + total_str)\n\tlog('END BACKUP')\n\tlog_endblock()\n\nif __name__ == '__main__':\n\tmain()\n\tLOG.close()","repo_name":"thvesteves/dir-backup","sub_path":"dirbackup.py","file_name":"dirbackup.py","file_ext":"py","file_size_in_byte":19346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20821319323","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nclass ScratchLinearRegression():\n \"\"\"\n 線形回帰のスクラッチ実装\n\n Parameters\n ----------\n num_iter : int\n イテレーション数\n lr : float\n 学習率\n no_bias : bool\n バイアス項を入れない場合はTrue\n verbose : bool\n 学習過程を出力する場合はTrue\n\n Attributes\n ----------\n self.coef_ : 次の形のndarray, shape (n_features,)\n パラメータ\n self.loss : 次の形のndarray, shape (self.iter,)\n 学習用データに対する損失の記録\n self.val_loss : 次の形のndarray, shape (self.iter,)\n 検証用データに対する損失の記録\n\n \"\"\"\n\n def __init__(self, num_iter=300, lr=0.01, bias=False, verbose=False, coef=False):\n # ハイパーパラメータを属性として記録\n self.iter = num_iter\n self.lr = lr\n self.bias = bias\n self.coef = coef\n self.verbose = verbose\n # 損失を記録する配列を用意\n self.loss = np.zeros(self.iter)\n self.train_loss = np.zeros(self.iter)\n self.val_loss = np.zeros(self.iter)\n\n\n def _linear_hypothesis(self, X):\n \"\"\"\n 線形の仮定関数を計算する\n\n Parameters\n ----------\n X : 次の形のndarray, shape (n_samples, n_features)\n 学習データ\n\n Returns\n -------\n 次の形のndarray, shape (n_samples, 1)\n 線形の仮定関数による推定結果\n\n \"\"\"\n return np.dot(X, self.coef)\n\n\n def _compute_cost(self, X, y):\n \"\"\"\n 平均二乗誤差を計算する。MSEは共通の関数を作っておき呼び出す\n\n Parameters\n ----------\n X : 次の形のndarray, shape (n_samples, n_features)\n 学習データ\n y : 次の形のndarray, shape (n_samples, 1)\n 正解値\n\n Returns\n -------\n 次の形のndarray, shape (1,)\n 平均二乗誤差\n \"\"\"\n y_pred = self._linear_hypothesis(X)\n return self._MSE(y_pred, y)\n\n def _MSE(self, y_pred, y):\n \"\"\"\n 平均二乗誤差の計算\n\n Parameters\n ----------\n y_pred : 次の形のndarray, shape (n_samples,)\n 推定した値\n y : 次の形のndarray, shape (n_samples,)\n 正解値\n\n Returns\n ----------\n mse : numpy.float\n 平均二乗誤差\n \"\"\"\n m = len(y)\n error = y_pred - y\n total_error = np.sum(error**2)\n J = total_error / (2*m)\n return J\n\n\n def _gradient_descent(self, X, y, X_val, y_val):\n \"\"\"\n 最急降下法でパラーメータを更新\n\n Parameters\n ----------\n X : 次の形のndarray, shape (n_samples, n_features)\n 学習データ\n y : 次の形のndarray, shape (n_samples, 1)\n 正解値\n loss :  損失値\n\n Returns\n -------\n 次の形のndarray, shape (1,)\n パラメータ\n\n \"\"\"\n m = len(y)\n \n for i in range(self.iter):\n h = self._linear_hypothesis(X)\n error = h - np.reshape(y, (len(y),1))\n self.coef = self.coef - (self.lr/m)*np.dot(X.T, error)\n self.train_loss[i] = self._compute_cost(X, y)\n self.val_loss[i] = self._compute_cost(X_val, y_val)\n\n def fit(self, X, y, X_val, y_val):\n \"\"\"\n 線形回帰を学習する。検証用データが入力された場合はそれに対する損失と精度もイテレーションごとに計算する。\n\n Parameters\n ----------\n X : 次の形のndarray, shape (n_samples, n_features)\n 学習用データの特徴量\n y : 次の形のndarray, shape (n_samples, )\n 学習用データの正解値\n X_val : 次の形のndarray, shape (n_samples, n_features)\n 検証用データの特徴量\n y_val : 次の形のndarray, shape (n_samples, )\n 検証用データの正解値\n \"\"\"\n X = np.insert(X, 0, 1, axis=1)\n X_val = np.insert(X_val, 0, 1, axis=1)\n self.coef = np.reshape(np.random.randn(X.shape[1]), (X.shape[1],1))\n\n #訓練データを使ってパラメータを算出\n self._gradient_descent(X, y, X_val, y_val)\n\n if self.verbose:\n #verboseをTrueにした際は学習過程を出力\n print()\n\n\n\n def plot(self):\n \"\"\"\n 算出された損失を可視化する関数\n \"\"\"\n plt.xlabel('iter', fontsize = 16)\n plt.ylabel('loss', fontsize = 16)\n plt.plot(range(self.iter), self.train_loss, label='train_loss')\n plt.plot(range(self.iter), self.val_loss, label='val_loss')\n plt.legend()\n\n\n def predict(self, X):\n \"\"\"\n 線形回帰を使い推定する。\n\n Parameters\n ----------\n X : 次の形のndarray, shape (n_samples, n_features)\n サンプル\n\n Returns\n -------\n 次の形のndarray, shape (n_samples, 1)\n 線形回帰による推定結果\n \"\"\"\n\n pass\n return","repo_name":"YasunoriKimura/diveintocode-ml","sub_path":"dic-term1/sprint3/utils/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15458578357","text":"from matplotlib import pyplot as plt\n\n# function for line generation\ndef determineline(x0, y0, x1, y1, s):\n print(f'slope = {(y1-y0)/(x1-x0)}')\n if (y1-y0)/(x1-x0) > 1:\n return drawline(y0, x0, y1, x1, True, False, s)\n elif 0 < (y1-y0)/(x1-x0) < 1:\n return drawline(x0, y0, x1, y1, False, False, s)\n elif -1 < (y1-y0)/(x1-x0) < 0:\n print((y1-y0)/(x1-x0))\n return drawline(x0, -y0, x1, -y1, False, True, s)\n else:\n return drawline(-y0, x0, -y1, x1, True, True, s)\n\n# d = sloper > 1\ndef drawline(x0, y0, x1, y1, d, neg, s):\n verts = []\n dx=abs(x1-x0)\n dy=abs(y1-y0)\n x=min(x0, x1)\n y=min(y0, y1)\n p=2*dy-dx\n\n max_x = max(x0, x1)\n print(f' start: {x}, {max_x}')\n\n while x <= max_x:\n if d:\n nx = y\n ny = x\n else:\n nx = x\n ny = y\n if neg:\n ny = -ny\n verts.append([nx, ny, s])\n\n if p >= 0:\n y=y+1\n p=p+2*dy-2*dx\n else:\n p=p+2*dy\n x=x+1\n\n return verts\n\n\ntri_n = [\n [46.00000000000001, 6],\n [37, 8.999999999999998],\n [43, 9]\n]\n\ntri = [ [ round(p[0]), round(p[1]) ] for p in tri_n ]\n\n\npoints = []\n\nprint(tri)\npoints = determineline(tri[0][0], tri[0][1], tri[1][0], tri[1][1], 'o')\n# points += determineline(tri[1][0], tri[1][1], tri[2][0], tri[2][1], 's')\n# points += determineline(tri[2][0], tri[2][1], tri[0][0], tri[0][1], '+')\n\nprint(points)\n\nmin_y = round(min(points, key=lambda tup: tup[1])[1])\nmax_y = round(max(points, key=lambda tup: tup[1])[1])\n\nprint(len(points), min_y, max_y)\n\n\nfor point in points:\n plt.plot(point[0], point[1], marker=point[2], markersize=5, markeredgecolor=\"red\", markerfacecolor=\"green\")\n\npairs = []\n\nfor y in range(min_y-1, max_y+1):\n pair = [y, 10000, 0]\n\n for point in points:\n if round(point[1]) == y:\n if point[0] < pair[1]:\n pair[1] = point[0]\n if point[0] > pair[2]:\n pair[2] = point[0]\n pairs.append(pair)\n\nfor pair in pairs:\n if pair[1] == 10000 or pair[2] == 0:\n continue\n plt.plot(pair[1], pair[0], marker=\"o\", markersize=5, markeredgecolor=\"blue\", markerfacecolor=\"green\")\n plt.plot(pair[2], pair[0], marker=\"o\", markersize=5, markeredgecolor=\"blue\", markerfacecolor=\"green\")\n\nplt.plot((tri_n[0][0], tri_n[1][0]), (tri_n[0][1], tri_n[1][1]))\nplt.plot((tri_n[1][0], tri_n[2][0]), (tri_n[1][1], tri_n[2][1]))\nplt.plot((tri_n[2][0], tri_n[0][0]), (tri_n[2][1], tri_n[0][1]))\n\nplt.grid()\nplt.show()","repo_name":"JoeyShapiro/bay-leaf","sub_path":"rasterize.py","file_name":"rasterize.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21888057796","text":"from __future__ import absolute_import, print_function\n\nimport unittest\n\nfrom OPSI.Object import OpsiClient\nfrom OPSI.Util.Task.ConfigureBackend.ConfigurationData import createWANconfigs\n\nfrom .Backends.File import FileBackendBackendManagerMixin\n\n\nclass SimpleWanConfigTestCase(unittest.TestCase, FileBackendBackendManagerMixin):\n \"\"\"\n Testing the group actions.\n \"\"\"\n def setUp(self):\n self.setUpBackend()\n createWANconfigs(self.backend)\n\n def tearDown(self):\n self.tearDownBackend()\n\n def testEnablingSettingForOneHost(self):\n clientId = 'testclient.test.invalid'\n self.backend.host_createObjects(OpsiClient(id=clientId))\n\n self.backend.changeWANConfig(True, clientId)\n self.assertTrue(self.clientHasWANEnabled(clientId))\n\n self.backend.changeWANConfig(False, clientId)\n self.assertFalse(self.clientHasWANEnabled(clientId))\n\n def clientHasWANEnabled(self, clientId):\n configsToCheck = set([\n \"opsiclientd.event_gui_startup.active\",\n \"opsiclientd.event_gui_startup{user_logged_in}.active\",\n \"opsiclientd.event_net_connection.active\",\n \"opsiclientd.event_timer.active\"\n ])\n\n for configState in self.backend.configState_getObjects(objectId=clientId):\n if configState.configId == u\"opsiclientd.event_gui_startup.active\":\n if configState.values[0]:\n return False\n configsToCheck.remove(u\"opsiclientd.event_gui_startup.active\")\n elif configState.configId == u\"opsiclientd.event_gui_startup{user_logged_in}.active\":\n if configState.values[0]:\n return False\n configsToCheck.remove(u\"opsiclientd.event_gui_startup{user_logged_in}.active\")\n elif configState.configId == u\"opsiclientd.event_net_connection.active\":\n if not configState.values[0]:\n return False\n configsToCheck.remove(u\"opsiclientd.event_net_connection.active\")\n elif configState.configId == u\"opsiclientd.event_timer.active\":\n if not configState.values[0]:\n return False\n configsToCheck.remove(u\"opsiclientd.event_timer.active\")\n\n if configsToCheck:\n print(\"The following configs were not set: {0}\".format(configsToCheck))\n return False\n\n return True\n\n def testEnablingSettingForMultipleHosts(self):\n clientIds = ['testclient{0}.test.invalid'.format(num) for num in range(10)]\n self.backend.host_createObjects([OpsiClient(id=clientId) for clientId in clientIds])\n\n self.backend.changeWANConfig(True, clientIds)\n\n for clientId in clientIds:\n self.assertTrue(self.clientHasWANEnabled(clientId))\n\n def testNotProcessingEmptyList(self):\n self.backend.changeWANConfig(True, [])\n\n def testNotChangingUnreferencedClient(self):\n clientIds = ['testclient{0}.test.invalid'.format(num) for num in range(10)]\n singleClient = 'testclient99.test.invalid'\n self.backend.host_createObjects([OpsiClient(id=clientId) for clientId in clientIds])\n self.backend.host_createObjects([OpsiClient(id=singleClient)])\n\n self.backend.changeWANConfig(True, clientIds)\n self.backend.changeWANConfig(True, [])\n\n for clientId in clientIds:\n self.assertTrue(self.clientHasWANEnabled(clientId))\n\n self.assertFalse(self.clientHasWANEnabled(singleClient))\n\n def testUsingNonBooleanParameters(self):\n client = OpsiClient(id='testclient101.test.invalid')\n\n self.backend.host_createObjects([client])\n\n self.backend.changeWANConfig(False, client.id)\n for term in (\"on\", \"1\", \"true\"):\n self.backend.changeWANConfig(term, client.id)\n self.assertTrue(self.clientHasWANEnabled(client.id))\n self.backend.changeWANConfig(False, client.id)\n\n self.backend.changeWANConfig(True, client.id)\n for term in (\"off\", \"false\", \"0\"):\n self.backend.changeWANConfig(term, client.id)\n self.assertFalse(self.clientHasWANEnabled(client.id))\n self.backend.changeWANConfig(True, client.id)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kochd/python-opsi","sub_path":"tests/test_backend_extend_d_70_wan.py","file_name":"test_backend_extend_d_70_wan.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14128698915","text":"# seasonal.py\n# Lisa Thoms\n# Final Project\n\n# these are the functions seen in my midterm, fixed and one of the menu options.\n\nimport random\n\ndef seasons():\n print('\\nWhat season is it? Spring, Summer, Fall, Winter?')\n \n while True:\n curSeason = input() #getting season input\n # random number between 1-7 for color grab\n randomColor = random.randint(0,6)\n if curSeason.lower() == 'spring':\n spring(randomColor)\n break\n elif curSeason.lower() == 'summer':\n summer(randomColor)\n break\n elif curSeason.lower() == 'fall':\n fall(randomColor)\n break\n elif curSeason.lower() == 'winter':\n winter(randomColor)\n break\n elif curSeason.lower() != ('spring', 'summer', 'fall', 'winter') or curSeason.isalnum():\n print('\\nPlease enter a valid season. Spring, Summer, Fall, Winter')\n continue\n\n#creating the color lists for the seasons\n#each list has 7 colors\nspringColors = ['peach', 'light pink', 'mint green', 'baby blue', 'nude', 'soft yellow', 'lavender']\nsummerColors = ['white', 'yellow', 'bright red', 'orange', 'fuchsia', 'turquoise', 'royal blue']\nfallColors = ['brown', 'mustard yellow', 'burnt orange', 'hunter green', 'mauve', 'maroon', 'black']\nwinterColors = ['white', 'black', 'dark grey', 'ruby red', 'dark purple', 'emerald green', 'dark blue']\n\n# creating the season type functions, using the randomColor int to pick from the lists\ndef spring(randomColor): #spring\n # generate the random color from the list\n print('\\nThe color you should pick is ' + springColors[randomColor] + '.')\n topper()\n \ndef summer(randomColor): #summer\n # generate the random color from the list\n print('\\nThe color you should pick is ' + summerColors[randomColor] + '.')\n topper()\n\ndef fall(randomColor): #fall\n # generate the random color from the list\n print('\\nThe color you should pick is ' + fallColors[randomColor] + '.')\n topper()\n \ndef winter(randomColor): #winter\n # generate the random color from the list\n print('\\nThe color you should pick is ' + winterColors[randomColor] + '.')\n topper()\n\n#creating the topper list\ntoppers = ['holographic', 'flakies', 'glitter', 'microshimmer']\n\ndef topper(): #topper function (this is what makes a nail polish pretty!)\n print('\\nDo you want a nail polish topper?')\n while True:\n topper = input() #getting input\n # random number between 1-4 for topper grab\n randomTop = random.randint(0,3)\n if topper.upper() == 'N' or topper.upper() == 'NO':\n print('A clear glossy top coat it is!')\n break\n elif topper.upper() == 'Y' or topper.upper() == 'YES':\n print('The topper you should use is ' + toppers[randomTop] + '.')\n break\n elif topper.upper() != ('N', 'NO', 'Y', 'YES') or topper.isalnum():\n print('\\nPlease enter a valid input. Y or N.')\n continue\n\n print('\\nDo you want a different combination? Y or N: ')\n # while loop to go back through game until user selects no\n # then loop to main menu\n while True:\n newCombo = input()\n if newCombo.upper() == 'N' or newCombo.upper() == 'NO':\n # exit minigame\n break\n elif newCombo.upper() == 'Y' or newCombo.upper() == 'YES':\n print('\\nLet\\'s try again.')\n seasons() # going back to num picker\n break\n elif newCombo.upper() != ('N', 'NO', 'Y', 'YES') or newCombo.isalnum():\n print('\\nPlease enter a valid input. Y or N.')\n # proceed back to start to enter correct input\n continue\n \n","repo_name":"lbthoms/nailpolish","sub_path":"seasonal.py","file_name":"seasonal.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74931505421","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> Optional[ListNode]:\n def getLen(node):\n count = 0\n while node:\n count += 1\n node = node.next\n \n return count\n \n l1,l2 = getLen(headA),getLen(headB)\n longer = headA if l1 > l2 else headB\n shorter = headB if l1 > l2 else headA\n \n for _ in range(abs(l2 - l1)):\n longer = longer.next\n \n while shorter != longer:\n shorter = shorter.next\n longer = longer.next\n \n return shorter","repo_name":"LibenHailu/interview-prep","sub_path":"squid-game/round4/160. Intersection of Two Linked Lists.py","file_name":"160. Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32283667626","text":"import random\r\nimport time\r\nfrom util import util\r\n\r\nNCARDS = 8\r\nINIT_SCORE = 100\r\nSUIT_TUPLE = ('Clubs', 'Diamonds', 'Hearts', 'Spades') # 값이 큰 순서대로 배치\r\nRANK_TUPLE = ('Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King')\r\n\r\ndef createDeck():\r\n deck = []\r\n for suit in SUIT_TUPLE:\r\n for value, rank in enumerate(RANK_TUPLE):\r\n card = {'suit': suit, 'rank': rank, 'value': value+1}\r\n deck.append(card)\r\n return deck\r\n\r\ndef shuffleCards(deck):\r\n random.shuffle(deck)\r\n return deck\r\n\r\ndef getCards(deck, numCards):\r\n cardList = []\r\n for i in range(numCards):\r\n card = deck.pop() # 맨 뒤에서 한장 제거\r\n cardList.append(card)\r\n return cardList\r\n\r\n# 카드 목록을 출력\r\ndef printCardList(cardList):\r\n for i in range(len(cardList)):\r\n print(f\"{i+1}. {cardList[i]['suit']} {cardList[i]['rank']}\")\r\n\r\ndef game1(score):\r\n\r\n print(\">>> Higher or Lower 게임(1)을 시작합니다.\")\r\n print(f\">>> 현재 점수는 {score}점입니다.\")\r\n\r\n deck = createDeck()\r\n print(\">>> 카드를 섞습니다.\")\r\n deck = shuffleCards(deck)\r\n time.sleep(1)\r\n\r\n print(f\">>> {NCARDS}장의 카드를 받습니다.\")\r\n myCardList = getCards(deck, NCARDS)\r\n time.sleep(1)\r\n\r\n curCard = myCardList.pop()\r\n print(f\">>> 첫번째 카드는 {curCard['suit']}, {curCard['rank']}입니다.\")\r\n\r\n for i in range(NCARDS-1):\r\n while True:\r\n choice = input(\"다음 카드 숫자가 더 클지('h'), 작을지('l') 맞춰보세요: \")\r\n if choice in \"hHlL\":\r\n choice = choice.lower()\r\n break\r\n else:\r\n print(\">>> [ERROR] 'h' 또는 'l' 중 하나를 입력하세요!!!\")\r\n\r\n chStr = 'lower' if choice == 'l' else 'higher'\r\n print(f\">>> 당신은 {chStr}를 선택했습니다.\")\r\n print(\">>> 이제 다음 카드를 확인합니다.\")\r\n time.sleep(1)\r\n\r\n nextCard = myCardList.pop()\r\n print(f\">>> 다음 카드는 {nextCard['suit']}, {nextCard['rank']}입니다.\")\r\n\r\n if (choice == 'l' and nextCard['value'] < curCard['value']) or \\\r\n (choice == 'h' and nextCard['value'] > curCard['value']):\r\n print(\">>> 당신의 예측이 맞았습니다! 20점을 획득합니다.\")\r\n score += 20\r\n else:\r\n print(\">>> 당신의 예측이 틀렸습니다! 15점을 잃습니다.\")\r\n score -= 15\r\n\r\n curCard = nextCard\r\n\r\n if i < NCARDS-2:\r\n print(f\">>> 당신의 현재 점수는 {score}점��니다.\")\r\n time.sleep(1)\r\n else:\r\n print(f\">>> 게임이 끝났습니다. 당신의 최종 점수는 {score}점입니다.\")\r\n\r\n return score\r\n\r\ndef game2(score):\r\n\r\n print(\">>> Higher or Lower 게임(2)을 시작합니다.\")\r\n print(f\">>> 현재 점수는 {score}점입니다.\")\r\n\r\n deck = createDeck()\r\n print(\">>> 카드를 섞습니다.\")\r\n deck = shuffleCards(deck)\r\n time.sleep(1)\r\n\r\n print(f\">>> 당신에게 {NCARDS}장의 카드를 나누어줍니다.\")\r\n myCardList = getCards(deck, NCARDS)\r\n time.sleep(1)\r\n\r\n print(f\">>> 딜러에게 {NCARDS}장의 카드를 나누어줍니다.\")\r\n dealerCardList = getCards(deck, NCARDS)\r\n time.sleep(1)\r\n\r\n for i in range(NCARDS):\r\n\r\n print(\">>> 당신의 현재 카드는 다음과 같습니다.\")\r\n printCardList(myCardList)\r\n\r\n while True:\r\n\r\n if len(myCardList) >= 2:\r\n choice = input(\"선택할 카드 번호를 입력하세요: \")\r\n if str(1) <= choice <= str(len(myCardList)):\r\n choice = int(choice)\r\n break\r\n else:\r\n print(\">>> [ERROR] 목록의 번호를 정확히 입력하세요!!!\")\r\n else:\r\n choice = 1\r\n break\r\n\r\n myCard = myCardList.pop(choice-1)\r\n print(f\">>> 당신의 카드는 {myCard['suit']}, {myCard['rank']}입니다.\")\r\n\r\n dealerCard = dealerCardList.pop()\r\n print(f\">>> 딜러의 카드는 {dealerCard['suit']}, {dealerCard['rank']}입니다.\")\r\n\r\n time.sleep(1)\r\n\r\n if myCard['value'] > dealerCard['value']:\r\n print(\">>> 당신의 카드 숫자가 딜러의 카드 숫자보다 큽니다! 20점을 획득합니다.\")\r\n score += 20\r\n elif myCard['value'] < dealerCard['value']:\r\n print(\">>> 당신의 카드 숫자가 딜러의 카드 숫자보다 작습니다! 20점을 잃습니다.\")\r\n score -= 20\r\n elif SUIT_TUPLE.index(myCard['suit']) > SUIT_TUPLE.index(dealerCard['suit']):\r\n print(\">>> 당신과 딜러의 카드 숫자가 같지만 당신 카드가 suit에서 앞섭니다! 20점을 획득합니다.\")\r\n score += 20\r\n else:\r\n print(\">>> 당신과 딜러의 카드 숫자가 같지만 당신 카드가 suit에서 뒤집니다! 20점을 잃습니다.\")\r\n score -= 20\r\n\r\n if i < NCARDS-1:\r\n print(f\">>> 당신의 현재 점수는 {score}점입니다.\")\r\n time.sleep(1)\r\n else:\r\n print(f\">>> 게임이 끝났습니다. 당신의 최종 점수는 {score}점입니다.\")\r\n\r\n return score\r\n\r\ndef main():\r\n util.print_header(\"2-2 Higher or Lower\", \"2022.09.27\", \"(c) Lee, Sang-gwon\")\r\n\r\n score = INIT_SCORE\r\n\r\n while True:\r\n game_num = util.int_get('>>> 게임 유형을 선택하세요(1.개인게임, 2.딜러대결, 3.종료):', minval=1, maxval=3, default=1)\r\n\r\n if game_num == 1:\r\n score = game1(score)\r\n elif game_num == 2:\r\n score = game2(score)\r\n else:\r\n print(f\">>> 당신의 최종 점수는 {score}점입니다. 게임을 종료합니다.\\n\")\r\n break\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"stu0430/PythonWorkSpace","sub_path":"AI Programming Exercises/higher_or_lower.py","file_name":"higher_or_lower.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12918469","text":"# this example runs a controller for pendulum environements.\n# the controller is fairly robust for single and double pendulum, but\n# not quite so for triple pendulum.\nfrom pendulum import *\n\nprogram = \"pend2\"\nif program == \"pend1\":\n env = Pendulum1Env(x_0=[np.pi * 0.9, -3.], dt=0.1)\n env.reset()\n control = ControllerSinglePendulum(env)\n n_step_lqr, n_step_ilqr = 50, 25\n Q = np.eye(2, 2)\n Q[0, 0] = 5\n Qf = np.eye(2, 2) * 1000\n R = np.eye(1, 1)\n x_goal = [0., 0.]\n ilqr_actions = control.run_ilqr(Q, R, Qf, x_goal, n_step_ilqr)\n lqr_actions = control.run_lqr(Q, R, x_goal, n_step_lqr, [0.])\n print(env.x)\n env.render()\nelif program == \"pend2\":\n env = Pendulum2Env(x_0=[np.pi * 0.5, np.pi * 0.1, 1., 0.], dt=0.025)\n env.reset()\n control = ControllerDoublePendulum(env)\n n_step_lqr, n_step_ilqr = 50, 50\n Q = np.eye(4, 4)\n Q[1, 1] = 0\n Q[2, 2] = 0\n Qf = np.eye(4, 4) * 1000\n R = np.eye(2, 2)\n x_goal = [0., 0., 0., 0.]\n ilqr_actions = control.run_ilqr(Q, R, Qf, x_goal, n_step_ilqr)\n lqr_actions = control.run_lqr(Q, R, x_goal, n_step_lqr, ilqr_actions[-1])\n print(env.x)\n env.render()\nelif program == \"pend3\":\n x_0 = [1, 2, 2., 0, 0, 0]\n env = Pendulum3Env(dt=0.02, x_0=x_0)\n env.reset()\n\n n_step_lqr, n_step_ilqr = 200, 50\n Q = np.eye(6, 6) *10\n Qf = np.eye(6, 6) * 1000\n R = np.eye(3, 3)\n x_goal = [0., 0., 0., 0., 0., 0.]\n\n x_mid = [1.5, 1., 1.5, 1., 1., 1.]\n x_mid2 = [0.5, 0.5, 0.5, 0.0, 0.0, -0.]\n\n u_init = np.zeros((3, n_step_ilqr))\n u_init[0, :n_step_ilqr] = np.linspace(1, 0, n_step_ilqr)\n control = ControllerTriplePendulum(env, use_sympy=False)\n ilqr_actions_1 = control.run_ilqr(Q, R, Qf, x_mid, n_step_ilqr, u_init=u_init)\n\n env.x_0 = env.x\n ilqr_actions_2 = control.run_ilqr(Q, R, Qf, x_mid2, n_step_ilqr)#, u_init=u_init*0.2)\n\n env.x_0 = env.x\n ilqr_actions_3 = control.run_ilqr(Q, R, Qf, x_goal, n_step_ilqr) # , u_init=u_init*0.2)\n\n #try:\n lqr_actions = control.run_lqr(Q, R, x_goal, n_step_lqr, ilqr_actions_3[-1], goal_action=[0]*3)\n # except:\n # lqr_actions = [np.zeros(3)]*n_step_lqr\n\n env.x_0 = x_0\n env.reset()\n\n all_actions = ilqr_actions_1 + ilqr_actions_2 + ilqr_actions_3 + lqr_actions\n for i in range(n_step_ilqr + n_step_ilqr + n_step_ilqr + n_step_lqr):\n env.x, _, _, _ = env.step(all_actions[i])\n env.render()\n env.animate(file_name=\"pend3_upright.gif\", dpi=90)","repo_name":"amaleki2/benchmark_closedloop_verification","sub_path":"example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"14650201141","text":"from dataclasses import dataclass\nimport datetime\nfrom typing import Any, List, TypeVar, Callable, Type, cast\nimport uuid\n\n\nT = TypeVar(\"T\")\n\n\ndef from_uuid(x: Any) -> uuid:\n assert isinstance(x, uuid) and not isinstance(x, str)\n return x\n\n\ndef from_bool(x: Any) -> bool:\n assert isinstance(x, bool) and not isinstance(x, int)\n return x\n\n\ndef from_int(x: Any) -> int:\n assert isinstance(x, int) and not isinstance(x, bool)\n return x\n\n\ndef from_str(x: Any) -> str:\n assert isinstance(x, str)\n return x\n\n\ndef from_list(f: Callable[[Any], T], x: Any) -> List[T]:\n assert isinstance(x, list)\n return [f(y) for y in x]\n\n\ndef to_class(c: Type[T], x: Any) -> dict:\n assert isinstance(x, c)\n return cast(Any, x).to_dict()\n\n\n@dataclass\nclass Customer:\n id: uuid\n year_birth: int\n education: str\n marital_status: str\n income: int\n kidhome: int\n teenhome: int\n dt_customer: datetime.datetime\n recency: int\n mnt_wines: int\n mnt_fruits: int\n mnt_meat_products: int\n mnt_fish_products: int\n mnt_sweet_products: int\n mnt_gold_prods: int\n num_deals_purchases: int\n num_web_purchases: int\n num_catalog_purchases: int\n num_store_purchases: int\n num_web_visits_month: int\n accepted_cmp3: bool\n accepted_cmp4: bool\n accepted_cmp5: bool\n accepted_cmp1: bool\n accepted_cmp2: bool\n complain: bool\n z_cost_contact: int\n z_revenue: int\n response: bool\n\n @staticmethod\n def from_dict(obj: Any) -> 'Customer':\n assert isinstance(obj, dict)\n id = from_uuid(obj.get(\"ID\"))\n year_birth = from_int(obj.get(\"Year_Birth\"))\n education = from_str(obj.get(\"Education\"))\n marital_status = from_str(obj.get(\"Marital_Status\"))\n income = from_int(obj.get(\"Income\"))\n kidhome = from_int(obj.get(\"Kidhome\"))\n teenhome = from_int(obj.get(\"Teenhome\"))\n dt_customer = from_str(obj.get(\"Dt_Customer\"))\n recency = from_int(obj.get(\"Recency\"))\n mnt_wines = from_int(obj.get(\"MntWines\"))\n mnt_fruits = from_int(obj.get(\"MntFruits\"))\n mnt_meat_products = from_int(obj.get(\"MntMeatProducts\"))\n mnt_fish_products = from_int(obj.get(\"MntFishProducts\"))\n mnt_sweet_products = from_int(obj.get(\"MntSweetProducts\"))\n mnt_gold_prods = from_int(obj.get(\"MntGoldProds\"))\n num_deals_purchases = from_int(obj.get(\"NumDealsPurchases\"))\n num_web_purchases = from_int(obj.get(\"NumWebPurchases\"))\n num_catalog_purchases = from_int(obj.get(\"NumCatalogPurchases\"))\n num_store_purchases = from_int(obj.get(\"NumStorePurchases\"))\n num_web_visits_month = from_int(obj.get(\"NumWebVisitsMonth\"))\n accepted_cmp3 = from_bool(obj.get(\"AcceptedCmp3\"))\n accepted_cmp4 = from_bool(obj.get(\"AcceptedCmp4\"))\n accepted_cmp5 = from_bool(obj.get(\"AcceptedCmp5\"))\n accepted_cmp1 = from_bool(obj.get(\"AcceptedCmp1\"))\n accepted_cmp2 = from_bool(obj.get(\"AcceptedCmp2\"))\n complain = from_bool(obj.get(\"Complain\"))\n z_cost_contact = from_int(obj.get(\"Z_CostContact\"))\n z_revenue = from_int(obj.get(\"Z_Revenue\"))\n response = from_bool(obj.get(\"Response\"))\n return Customer(id, year_birth, education, marital_status, income, kidhome, teenhome, dt_customer, recency, mnt_wines, mnt_fruits, mnt_meat_products, mnt_fish_products, mnt_sweet_products, mnt_gold_prods, num_deals_purchases, num_web_purchases, num_catalog_purchases, num_store_purchases, num_web_visits_month, accepted_cmp3, accepted_cmp4, accepted_cmp5, accepted_cmp1, accepted_cmp2, complain, z_cost_contact, z_revenue, response)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"ID\"] = from_uuid(self.id)\n result[\"Year_Birth\"] = from_int(self.year_birth)\n result[\"Education\"] = from_str(self.education)\n result[\"Marital_Status\"] = from_str(self.marital_status)\n result[\"Income\"] = from_int(self.income)\n result[\"Kidhome\"] = from_int(self.kidhome)\n result[\"Teenhome\"] = from_int(self.teenhome)\n result[\"Dt_Customer\"] = from_str(self.dt_customer)\n result[\"Recency\"] = from_int(self.recency)\n result[\"MntWines\"] = from_int(self.mnt_wines)\n result[\"MntFruits\"] = from_int(self.mnt_fruits)\n result[\"MntMeatProducts\"] = from_int(self.mnt_meat_products)\n result[\"MntFishProducts\"] = from_int(self.mnt_fish_products)\n result[\"MntSweetProducts\"] = from_int(self.mnt_sweet_products)\n result[\"MntGoldProds\"] = from_int(self.mnt_gold_prods)\n result[\"NumDealsPurchases\"] = from_int(self.num_deals_purchases)\n result[\"NumWebPurchases\"] = from_int(self.num_web_purchases)\n result[\"NumCatalogPurchases\"] = from_int(self.num_catalog_purchases)\n result[\"NumStorePurchases\"] = from_int(self.num_store_purchases)\n result[\"NumWebVisitsMonth\"] = from_int(self.num_web_visits_month)\n result[\"AcceptedCmp3\"] = from_bool(self.accepted_cmp3)\n result[\"AcceptedCmp4\"] = from_bool(self.accepted_cmp4)\n result[\"AcceptedCmp5\"] = from_bool(self.accepted_cmp5)\n result[\"AcceptedCmp1\"] = from_bool(self.accepted_cmp1)\n result[\"AcceptedCmp2\"] = from_bool(self.accepted_cmp2)\n result[\"Complain\"] = from_bool(self.complain)\n result[\"Z_CostContact\"] = from_int(self.z_cost_contact)\n result[\"Z_Revenue\"] = from_int(self.z_revenue)\n result[\"Response\"] = from_bool(self.response)\n return result\n\n\ndef customer_from_dict(s: Any) -> List[Customer]:\n return from_list(Customer.from_dict, s)\n\n\ndef customer_to_dict(x: List[Customer]) -> Any:\n return from_list(lambda x: to_class(Customer, x), x)\n","repo_name":"mikejmz24/CSV_Dataset_Generator","sub_path":"customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71364828944","text":"from collections import deque\ninput=__import__('sys').stdin.readline\nMIS=lambda:map(int,input().rstrip().split())\nn,m,t=MIS();board=[[]];order=[]\ndx=[-1,0,1,0];dy=[0,-1,0,1]\nfor _ in range(n):\n board.append(deque(MIS()))\nfor _ in range(t):\n order.append(list(MIS()))\n\nfor o_x,d,k in order:\n idx=0\n if d==0: # 시계 방향\n for row in range(o_x,len(board),o_x):\n board[row].rotate(k);idx=row\n \n else: # 반시계 방향\n for row in range(o_x,len(board),o_x):\n board[row].rotate(-k);idx=row\n\n pos=[];value=0;cnt=0\n for x in range(1,len(board)):\n for y in range(len(board[x])):\n if board[x][y]!=0:\n value+=board[x][y];cnt+=1\n for i in range(4):\n nx=x+dx[i]\n ny=(y+dy[i])%m\n if 1<=nx<=n and 0<=nyavg: board[i][j]-=1\n\nresult=0\nfor i in range(1,len(board)):\n for j in range(len(board[i])):\n if board[i][j]!=0: result+=board[i][j]\nprint(result)","repo_name":"CodeTest-StudyGroup/Code-Test-Study","sub_path":"wan2good/백준/12주차_원판돌리기.py","file_name":"12주차_원판돌리기.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1095,"dataset":"github-code","pt":"47"} +{"seq_id":"33093543172","text":"def factorial(n):\n\tfact = 1\n\tif n < 0 :\n\t\tprint(\"Number should be greater than 0..\")\n\telif n == 0 :\n\t\tprint(\"Factorial of 0 is 1..\")\n\telse:\n\t\tfor i in range(1,n + 1):\n\t\t\tfact = fact * i\n\t\t\ti = i + 1\n\t\tprint(\"Factorial is\",fact)\n\nn = int(input(\"enter number : \"))\nfactorial(n)\n","repo_name":"Preksha1998/python","sub_path":"functions/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37726368764","text":"import os\r\n\r\nimport pytest\r\nfrom selenium import webdriver\r\nfrom utils.configs.config import Config\r\n\r\n\r\n@pytest.fixture(autouse=True)\r\ndef run_around_tests():\r\n print(\"\\nTest başladı\\n\")\r\n yield\r\n print(\"\\nTest Tamamlandı\\n\")\r\n\r\n\r\n@pytest.fixture()\r\ndef setup():\r\n project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\r\n driver_path = os.path.join(project_root, 'utils', 'drivers', 'chromedriver')\r\n\r\n browser_name = Config.get_driver_name()\r\n implicit_wait_time = Config.get_implicity_wait()\r\n options = get_browser_options()\r\n\r\n if browser_name.lower() == \"chrome\":\r\n driver = webdriver.Chrome(options=options, executable_path=driver_path)\r\n elif browser_name.lower() == \"firefox\":\r\n driver = webdriver.Firefox(options=options, executable_path=driver_path)\r\n elif browser_name.lower() == \"edge\":\r\n driver = webdriver.Edge(executable_path=driver_path)\r\n else:\r\n raise Exception(\"Invalid browser name provided!\")\r\n\r\n driver.implicitly_wait(implicit_wait_time)\r\n return driver\r\n\r\n\r\ndef get_browser_options():\r\n options = webdriver.ChromeOptions()\r\n options.add_argument(\"--start-maximized\")\r\n options.add_argument(\"--incognito\")\r\n options.add_argument(\"--disable-blink-features=AutomationControlled\")\r\n prefs = {\"profile.default_content_setting_values.notifications\": 1}\r\n options.add_experimental_option(\"prefs\", prefs)\r\n return options","repo_name":"ssanemkaraa/PytestUIAutomationStructure","sub_path":"testCases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"5156639695","text":"import sys\r\nimport os\r\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\r\nfrom langchain.vectorstores import FAISS\r\nfrom langchain.retrievers import SVMRetriever\r\nfrom langchain.chains import RetrievalQA\r\nfrom langchain.chat_models import ChatOpenAI\r\nfrom langchain.embeddings.openai import OpenAIEmbeddings\r\nfrom langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\r\nfrom langchain.callbacks.base import CallbackManager\r\n\r\nretriever_type = \"SIMILARITY SEARCH\"\r\n# Use RecursiveCharacterTextSplitter as the default and only text splitter\r\nsplitter_type = \"RecursiveCharacterTextSplitter\"\r\n\r\ndef create_retriever(_embeddings, splits, retriever_type):\r\n if retriever_type == \"SIMILARITY SEARCH\":\r\n try:\r\n vectorstore = FAISS.from_texts(splits, _embeddings)\r\n except (IndexError, ValueError) as e:\r\n print(f\"Error creating vectorstore: {e}\")\r\n return\r\n retriever = vectorstore.as_retriever(k=5)\r\n elif retriever_type == \"SUPPORT VECTOR MACHINES\":\r\n retriever = SVMRetriever.from_texts(splits, _embeddings)\r\n\r\n return retriever\r\n\r\ndef split_texts(text, chunk_size, overlap, split_method):\r\n\r\n # Split texts\r\n # IN: text, chunk size, overlap, split_method\r\n # OUT: list of str splits\r\n\r\n split_method = \"RecursiveTextSplitter\"\r\n text_splitter = RecursiveCharacterTextSplitter(\r\n chunk_size=chunk_size, chunk_overlap=overlap)\r\n\r\n splits = text_splitter.split_text(text)\r\n if not splits:\r\n print(\"Failed to split document\")\r\n\r\n return splits\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) == 3:\r\n os.environ[\"OPENAI_API_KEY\"] = os.environ[\"TOKEN_OPENAI_CHATGPT\"]\r\n user_question_file = sys.argv[1]\r\n content_file = sys.argv[2]\r\n # Load and process the uploaded PDF or TXT files.\r\n with open(user_question_file, \"r\") as archivo:\r\n user_question = archivo.read()\r\n\r\n # Load and process the uploaded PDF or TXT files.\r\n with open(content_file, \"r\") as archivo:\r\n loaded_text = archivo.read()\r\n \r\n # Split the document into chunks\r\n splits = split_texts(loaded_text, chunk_size=1000,\r\n overlap=0, split_method=splitter_type)\r\n\r\n\r\n embeddings = OpenAIEmbeddings()\r\n retriever = create_retriever(embeddings, splits, retriever_type)\r\n # Initialize the RetrievalQA chain with streaming output\r\n callback_handler = StreamingStdOutCallbackHandler()\r\n callback_manager = CallbackManager([callback_handler])\r\n\r\n# TURBO = \"gpt-3.5-turbo\"\r\n# GPT4 = \"gpt-4\"\r\n# CLAUDE = \"claude-v1\"\r\n# CLAUDE_INSTANT = \"claude-instant-v1\"\r\n# WINDOW = \"window\"\r\n# model_name=\"gpt-3.5-turbo\",\r\n# model_name=\"text-curie-001\" \r\n chat_openai = ChatOpenAI(\r\n model_name=\"gpt-3.5-turbo\",\r\n streaming=True, callback_manager=callback_manager, verbose=True, temperature=0)\r\n qa = RetrievalQA.from_chain_type(llm=chat_openai, retriever=retriever, chain_type=\"stuff\", verbose=True)\r\n \r\n answer = qa.run(user_question)\r\n print(\"Answer:\", answer)\r\n\r\n\r\n","repo_name":"joenvihe/scraper_news","sub_path":"test_langchain.py","file_name":"test_langchain.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71030390543","text":"import itertools\nfrom dataclasses import dataclass\n\nimport numpy as np\n\nfrom autodidaqt import AutodiDAQt, Experiment\nfrom autodidaqt.mock import MockMotionController, MockScalarDetector\n\n\n@dataclass\nclass XScan:\n n_points_x: int = 20\n n_points_y: int = 20\n\n def sequence(self, experiment, mc, power_meter):\n experiment.plot(\n dependent=\"power_meter.device\",\n independent=[\"mc.stages[0]\"],\n name=\"Line Plot\",\n )\n experiment.plot(\n dependent=\"power_meter.device\",\n independent=[\"mc.stages[0]\", \"mc.stages[1]\"],\n name=\"Power\",\n size=lambda value: np.abs(value),\n )\n\n for x, y in itertools.product(range(self.n_points_x), (range(self.n_points_y))):\n with experiment.point():\n yield [mc.stages[0].write(x), mc.stages[1].write(y)]\n yield [power_meter.device.read()]\n\n\nclass MyExperiment(Experiment):\n scan_methods = [XScan]\n\n\napp = AutodiDAQt(\n __name__,\n {},\n dict(experiment=MyExperiment),\n dict(mc=MockMotionController, power_meter=MockScalarDetector),\n)\n\nif __name__ == \"__main__\":\n app.start()\n","repo_name":"chstan/autodiDAQt","sub_path":"autodidaqt/examples/scanning_custom_plots.py","file_name":"scanning_custom_plots.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"2064438902","text":"'''\nLeetcode easy - stack \nhttps://leetcode.com/problems/valid-parentheses/submissions/963272877/\n'''\n\nclass Solution:\n def isValid(self, s: str) -> bool:\n stack = []\n\n for index in range(len(s)):\n if index == 0:\n # this is the initialization stage \n stack.append(s[index])\n continue\n \n if len(stack) != 0:\n if s[index] == ')' and stack[-1] == '(':\n # means a match, pop\n stack.pop()\n \n elif s[index] == '}' and stack[-1] == '{':\n stack.pop()\n elif s[index] == ']' and stack[-1] == '[':\n stack.pop()\n else:\n stack.append(s[index])\n else:\n stack.append(s[index])\n\n return len(stack) == 0\n ","repo_name":"allan7yin/DataStructuresAndAlgorithms","sub_path":"Stack/ValidParentheses.py","file_name":"ValidParentheses.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"17091992153","text":"from SCRIBES.SignalConnectionManager import SignalManager\n\nclass Placer(SignalManager):\n\n\tdef __init__(self, manager, editor):\n\t\tSignalManager.__init__(self, editor)\n\t\tself.__init_attributes(manager, editor)\n\t\tself.connect(manager, \"destroy\", self.__destroy_cb)\n\t\tself.connect(manager, \"activate\", self.__activate_cb)\n\t\tself.connect(manager, \"inserted-text\", self.__text_cb)\n\n\tdef __init_attributes(self, manager, editor):\n\t\tself.__manager = manager\n\t\tself.__editor = editor\n\t\tself.__cursor_line_offset = 0\n\t\tself.__has_selection = False\n\t\treturn\n\n\tdef __place(self):\n\t\tif self.__has_selection: return False\n\t\tstart, end = self.__editor.line_bounds\n\t\tstart_offset, end_offset = start.get_line_offset(), end.get_line_offset()\n\t\tcursor_offset = self.__cursor_line_offset\n\t\tif cursor_offset < start_offset: cursor_offset = start_offset\n\t\tif cursor_offset > end_offset: cursor_offset = end_offset\n\t\titerator = self.__editor.cursor.copy()\n\t\titerator.set_line_offset(cursor_offset)\n\t\tself.__editor.textbuffer.place_cursor(iterator)\n\t\tself.__editor.grab_focus()\n\t\tfrom gobject import idle_add\n\t\tidle_add(self.__manager.emit, \"finished\")\n\t\treturn False\n\n\tdef __text_cb(self, *args):\n\t\tfrom gobject import idle_add\n\t\tidle_add(self.__place)\n\t\treturn False\n\n\tdef __activate_cb(self, *args):\n\t\tself.__has_selection = self.__editor.has_selection\n\t\tself.__cursor_line_offset = self.__editor.cursor.get_line_offset()\n\t\treturn False\n\n\tdef __destroy_cb(self, *args):\n\t\tself.disconnect()\n\t\tdel self\n\t\treturn False\n","repo_name":"mystilleef/scribes","sub_path":"LanguagePlugins/HashComments/CursorPlacer.py","file_name":"CursorPlacer.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2274853392","text":"import tensorflow as tf\nimport numpy as py\nimport matplotlib.pyplot as plt\n\nW = tf.Variable(.3, dtype = tf.float32)\nb = tf.Variable(-.3, dtype = tf.float32)\n\nx = tf.placeholder(dtype = tf.float32)\ny = tf.placeholder(dtype = tf.float32)\npy = W * x + b \n\ny = tf.placeholder(dtype = tf.float32)\nloss = tf.reduce_sum(tf.square(py - y))\n\nopt = tf.train.GradientDescentOptimizer(0.01)\ntrain = opt.minimize(loss)\n\nx_data = [1,2,3,4]\ny_data = [0, -1, -2, -3]\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(1000):\n sess.run(train, {x: x_data, y: y_data})\n curr_W,curr_b,curr_loss,curr_py = sess.run([W, b, loss, py], {x: x_data, y: y_data})\n print(\"W: %s b: %s loss: %s\"%(curr_W, curr_b, curr_loss))\n\n plt.plot(x_data, y_data,\"*\",x_data, curr_py,\"--\")\n plt.plot()\n plt.show()\n\n","repo_name":"zl810881283/tensorflow-learning","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22182115060","text":"from scipy.io.wavfile import read\nimport os\nfrom sklearn.model_selection import train_test_split\nimport tensorflow.keras as keras\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras import Input, layers\nfrom tensorflow.keras import backend as K\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport sounddevice as sd\nimport pickle as pkl\nimport librosa as librosa\nimport librosa.display\nfrom tensorflow.keras import backend as K\n\ndef preproces_audio(y, n_fft=2048, hop_length=512, sr=48000):\n spectrogram_librosa = np.abs(librosa.stft(\n y, n_fft=n_fft, hop_length=hop_length, win_length=n_fft, window='hann')) ** 2\n spectrogram_librosa_db = librosa.power_to_db(spectrogram_librosa, ref=np.max)\n spectrogram_librosa_db = spectrogram_librosa_db / (-80)\n spectrogram_librosa_db *= 2\n spectrogram_librosa_db -= 1\n return spectrogram_librosa_db\n\ndef create_weighted_binary_crossentropy(zero_weight, one_weight):\n\n def weighted_binary_crossentropy(y_true, y_pred):\n\n # Original binary crossentropy (see losses.py):\n # K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)\n\n # Calculate the binary crossentropy\n b_ce = K.binary_crossentropy(y_true, y_pred)\n\n # Apply the weights\n weight_vector = y_true * one_weight + (1. - y_true) * zero_weight\n weighted_b_ce = weight_vector * b_ce\n\n # Return the mean error\n return K.mean(weighted_b_ce)\n\n return weighted_binary_crossentropy\n\ndata = []\ny = []\nundersample = 1\n\n\nfor f_name in os.listdir('0'):\n data.append(read(os.path.join('0', f_name))[1][::undersample])\n y.append(0)\n\nfor f_name in os.listdir('1'):\n data.append(read(os.path.join('1', f_name))[1][::undersample])\n y.append(1)\n\n#sd.play(data[-2], samplerate=1500)\n#sd.wait()\n\nX = np.array(data)\nX = np.array([preproces_audio(i) for i in X])\nX = np.expand_dims(X, axis=-1)\ny = np.array(y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=420)\n\ninput_shape = (1025, 94, 1)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Conv2D(8, (3, 3), activation='relu', input_shape=input_shape))\nmodel.add(keras.layers.AvgPool2D((4, 2), strides=(3, 1)))\nmodel.add(keras.layers.Conv2D(16, (3, 3), activation='relu'))\nmodel.add(keras.layers.AvgPool2D((4, 2), strides=(3, 1)))\nmodel.add(keras.layers.Conv2D(32, (3, 3), activation='relu'))\nmodel.add(keras.layers.AvgPool2D((3, 2), strides=(2, 1)))\nmodel.add(keras.layers.Conv2D(32, (3, 3), activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.AvgPool2D((3, 2), strides=(2, 1)))\nmodel.add(keras.layers.Conv2D(16, (3, 3), activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.MaxPool2D((3, 3), strides=(3, 3)))\nmodel.add(keras.layers.Conv2D(16, (3, 3), activation='relu'))\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(6, activation='relu'))\nmodel.add(keras.layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(loss=keras.losses.binary_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])\n\nmodel.summary()\n\nhistory = model.fit(X_train, y_train, epochs=20, validation_data=(X_test, y_test),\n batch_size=32, class_weight={0: 0.35, 1: 1})\n\nfor i in list(history.history.keys()):\n plt.plot(history.history[i], label=i)\n\nplt.legend(loc='best')\nplt.show()\n\nmodel.save('model_CNN_4.pkl')","repo_name":"HbcOfficial1/minecraft_bot","sub_path":"ANN.py","file_name":"ANN.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"42678190853","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.optim as optim\nfrom speech_loader import SpeechLoader\nimport numpy as np\nfrom model import VGG\nfrom train import train, test\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n# 参数设置\nparser = argparse.ArgumentParser(description='Google Speech Commands Recognition')\nparser.add_argument('--train_path', default='data/train', help='path to the train data folder')\nparser.add_argument('--test_path', default='data/test', help='path to the test data folder')\nparser.add_argument('--valid_path', default='data/valid', help='path to the valid data folder')\nparser.add_argument('--batch_size', type=int, default=100, metavar='N', help='training and valid batch size')\nparser.add_argument('--test_batch_size', type=int, default=100, metavar='N', help='batch size for testing')\nparser.add_argument('--arc', default='VGG11', help='network architecture: VGG11, VGG13, VGG16, VGG19')\nparser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train')\nparser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum, for SGD only')\nparser.add_argument('--optimizer', default='adam', help='optimization method: sgd | adam')\nparser.add_argument('--cuda', default=True, help='enable CUDA')\nparser.add_argument('--seed', type=int, default=1234, metavar='S', help='random seed')\n\n# 特征提取参数设置\nparser.add_argument('--window_size', default=.02, help='window size for the stft')\nparser.add_argument('--window_stride', default=.01, help='window stride for the stft')\nparser.add_argument('--window_type', default='hamming', help='window type for the stft')\nparser.add_argument('--normalize', default=True, help='boolean, wheather or not to normalize the spect')\n\nargs = parser.parse_args()\n\n# 确定是否使用CUDA\nargs.cuda = args.cuda and torch.cuda.is_available()\ntorch.manual_seed(args.seed) # PyTorch随机种子设置\nif args.cuda:\n torch.cuda.manual_seed(args.seed) # CUDA随机种子设置\n\n# 加载数据, 训练集,验证集和测试集\ntrain_dataset = SpeechLoader(args.train_path, window_size=args.window_size, window_stride=args.window_stride,\n window_type=args.window_type, normalize=args.normalize)\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=20, pin_memory=args.cuda, sampler=None)\n\nvalid_dataset = SpeechLoader(args.valid_path, window_size=args.window_size, window_stride=args.window_stride,\n window_type=args.window_type, normalize=args.normalize)\nvalid_loader = torch.utils.data.DataLoader(\n valid_dataset, batch_size=args.batch_size, shuffle=None,\n num_workers=20, pin_memory=args.cuda, sampler=None)\n\ntest_dataset = SpeechLoader(args.test_path, window_size=args.window_size, window_stride=args.window_stride,\n window_type=args.window_type, normalize=args.normalize)\ntest_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.test_batch_size, shuffle=None,\n num_workers=20, pin_memory=args.cuda, sampler=None)\n\n# 建立网络模型\nmodel = VGG(args.arc)\n\nif args.cuda:\n print('Using CUDA with {0} GPUs'.format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model).cuda()\n\n# 定义优化器\nif args.optimizer.lower() == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\nelif args.optimizer.lower() == 'sgd':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\nelse:\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n#import pdb\n#pdb.set_trace()\n# train 和 valid过程\nfor epoch in range(1, args.epochs + 1):\n # 模型在train集上训练\n train(train_loader, model, optimizer, epoch, args.cuda)\n\n # 验证集测试\n test(valid_loader, model, args.cuda, 'valid')\n\n# 测试集验证\ntest(test_loader, model, args.cuda, 'test')\n\n\n","repo_name":"xiaobaoonline/pytorch-in-action","sub_path":"chapter8_PyTorch项目实战/speech_command/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":169,"dataset":"github-code","pt":"47"} +{"seq_id":"38381278860","text":"import typing\nfrom typing import Dict\nimport functools\n\nimport PySide2\nimport numpy as np\nimport qimage2ndarray\nfrom PySide2.QtCore import QAbstractTableModel, QModelIndex, Qt\nfrom PySide2.QtGui import QVector3D, QColor, QPixmap\nfrom skimage.color import hsv2rgb\n\nfrom arthropod_describer.common.label_image import PropertyType\nfrom arthropod_describer.common.state import State\nfrom arthropod_describer.common.utils import vector_to_img\n\nPropKey = str\nLabel = int\nPropName = str\nPhotoName = str\n\n\nclass MeasurementsTableModel(QAbstractTableModel):\n\n def __init__(self, state: State, parent: typing.Optional[PySide2.QtCore.QObject] = None):\n super().__init__(parent)\n\n self.column_names: typing.List[str] = []\n self.prop_tuple_list: typing.List[typing.Tuple[Label, PropKey, PropName]] = []\n self.state: State = state\n self.single_photo_mode: bool = True\n self.header_labels: typing.List[str] = []\n self.labels: typing.List[int] = []\n self.displayed_property_key: str = ''\n self._display_intensity_in_color: bool = True # color value will either be displayed as QColor(True) or numbers(False)\n\n def rowCount(self, parent:PySide2.QtCore.QModelIndex=QModelIndex()) -> int:\n if self.state.storage is None:\n return 0\n return self.state.storage.image_count\n pass\n\n def columnCount(self, parent:PySide2.QtCore.QModelIndex=QModelIndex()) -> int:\n if self.state.storage is None:\n return 0\n return len(self.column_names)\n\n def data(self, index:PySide2.QtCore.QModelIndex, role:int=Qt.DisplayRole) -> typing.Any:\n if self.state.storage is None:\n return None\n photo = self.state.storage.get_photo_by_idx(index.row(), load_image=False)\n label = self.prop_tuple_list[index.column()][0]\n props = photo['Labels'].get_region_props(label)\n\n if role == Qt.UserRole:\n return label\n elif role == Qt.UserRole + 1:\n return self.prop_tuple_list[index.column()][1]\n elif role == Qt.UserRole + 2:\n return self.prop_tuple_list[index.column()][2]\n elif role == Qt.UserRole + 3:\n return photo.image_name\n\n if props is None:\n if role == Qt.DisplayRole:\n return 'N/A'\n return None\n #if self.displayed_property_key not in props:\n # return None\n prop_key = self.prop_tuple_list[index.column()][1]\n if prop_key not in props:\n if role == Qt.DisplayRole:\n return 'N/A'\n return None\n prop = props[prop_key]\n if role == Qt.DisplayRole:\n if prop.prop_type in {PropertyType.Scalar, PropertyType.String}:\n return str(prop.value)\n if (prop.prop_type == PropertyType.Intensity or prop.prop_type == PropertyType.IntensityHSV) and not self._display_intensity_in_color:\n str_val = ''\n vals = prop.value[0] if prop.num_vals > 1 else [prop.value]\n for i, val in enumerate(vals):\n if type(val) == float:\n str_val = str_val + f', {val:.3f} {prop.val_names[i]}'\n else:\n str_val = str_val + f', {val} {prop.val_names[i]}'\n return str_val.strip(',')\n # if prop.prop_type == PropertyType.Vector:\n # return prop.format_value()\n if prop.prop_type == PropertyType.NDArray:\n val: np.ndarray = prop.value[0]\n return f'{val.shape[1:]} matrices for {\",\".join(prop.val_names)}'\n return None\n if role == Qt.BackgroundRole:\n if self._display_intensity_in_color:\n if prop.prop_type == PropertyType.Intensity:\n if prop.num_vals == 1:\n return QColor.fromRgbF(*(prop.value / 255.0, ) * 3)\n elif prop.num_vals == 3:\n clr = [val / 255.0 for val in prop.value[0]]\n return QColor.fromRgbF(*clr)\n elif prop.prop_type == PropertyType.IntensityHSV:\n val = prop.value[0]\n arr = np.array([[val[0] / 360.0, val[1] / 100.0, val[2] / 100.0]])\n clr = hsv2rgb(arr).tolist()[0]\n return QColor.fromRgbF(*clr)\n if role == Qt.DecorationRole:\n if prop.prop_type == PropertyType.Vector:\n if prop.vector_viz is None:\n viz = vector_to_img(prop.value[0], (128, 24))\n prop.vector_viz = QPixmap.fromImage(qimage2ndarray.array2qimage(viz))\n return prop.vector_viz\n if role == Qt.UserRole + 4:\n return prop\n return None\n\n def headerData(self, section:int, orientation:PySide2.QtCore.Qt.Orientation, role:int=Qt.DisplayRole) -> typing.Any:\n if self.state.storage is None:\n return None\n if role == Qt.DisplayRole:\n if orientation == Qt.Horizontal:\n return self.column_names[section]\n if orientation == Qt.Vertical:\n return self.state.storage.image_names[section]\n return None\n\n def display_property(self, prop_key: str):\n self.displayed_property_key = prop_key\n lab_hierarchy = self.state.label_hierarchy\n header_labels = set()\n labels = set()\n for i in range(self.state.storage.image_count):\n photo = self.state.storage.get_photo_by_idx(i, load_image=False)\n #header_labels = header_labels.union({self.state.colormap.label_names[prop.label] for prop in photo['Labels'].prop_list})\n labels = labels.union({prop.label for prop in photo['Labels'].prop_list})\n self.labels = list(labels)\n self.labels.sort()\n # TODO REMOVE\n #self.header_labels = [self.state.colormap.label_names[label] for label in self.labels]\n self.header_labels = [lab_hierarchy.nodes[label].name for label in self.labels]\n start = self.index(0, 0)\n end = self.index(self.rowCount()-1, self.columnCount() - 1)\n self.dataChanged.emit(start, end)\n\n def display_intensity_in_color(self, in_color: bool = True):\n self._display_intensity_in_color = in_color\n start = self.index(0, 0)\n end = self.index(self.rowCount()-1, self.columnCount() - 1)\n self.dataChanged.emit(start, end)\n\n def update_model(self):\n prop_tuple_set: typing.Set[typing.Tuple[Label, PropKey, PropName]] = set()\n for i in range(self.state.storage.image_count):\n photo = self.state.storage.get_photo_by_idx(i, load_image=False)\n for prop in photo['Labels'].prop_list:\n #prop_tuple = (prop.info.key, prop.label, prop.info.name)\n prop_tuple = (prop.label, prop.info.key, prop.info.name)\n prop_tuple_set.add(prop_tuple)\n self.prop_tuple_list = list(sorted(prop_tuple_set, key=lambda tup: tup[:2])) # same count as column count\n #self.column_names = [f'{tup[2]}:{self.state.colormap.label_names[tup[1]]}' for tup in self.prop_tuple_list]\n self.column_names.clear()\n #cur_prop_key = ''\n # TODO replace hardcoded `Labels`\n hierarchy = self.state.storage.get_label_hierarchy2('Labels')\n cur_label = -1\n for label, key, name in self.prop_tuple_list:\n #if cur_prop_key != key:\n #if cur_label != label:\n #col_name = f'{name}:{self.state.colormap.label_names[label]}'\n # TODO REMOVE\n #col_name = f'{self.state.colormap.label_names[label]}:{name}'\n col_name = f'{hierarchy.nodes[label].name}:{name}'\n #cur_label = label\n #cur_prop_key = key\n #else:\n #col_name = f':{self.state.colormap.label_names[label]}'\n # col_name = f':{name}'\n self.column_names.append(col_name)\n start = self.index(0, 0)\n end = self.index(self.rowCount()-1, self.columnCount() - 1)\n self.dataChanged.emit(start, end)\n\n\n","repo_name":"mrazr/maphis","sub_path":"arthropod_describer/measurements_viewer/measurements_model.py","file_name":"measurements_model.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43762564825","text":"# 这是一个示例 Python 脚本。\nimport time\n\nimport pygame, sys\nfrom pygame.locals import *\n\n# 按 Shift+F10 执行或将其替换为您的代码。\n# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。\n\npygame.init()\n# 创建一个对象Surface\nscreen = pygame.display.set_mode((400, 300))\npygame.display.set_caption('hellow')\n\nwhite = (255,255,255)\nGREEN = (0,255,255)\nBLUE = (0,0,255)\n\nfontObj = pygame.font.SysFont(\"华文中宋\",32)\ntextSurfaceObj = fontObj.render('hellow world', True ,GREEN,BLUE)#字体颜色,背景颜色\ntexeRectObj = textSurfaceObj.get_rect()\ntexeRectObj.center = (200,150)\nsoundObj = pygame.mixer.Sound('badswap.wav')\n\n\nwhile True:\n screen.fill(white)\n screen.blit(textSurfaceObj,texeRectObj)\n soundObj.play()\n time.sleep(2)\n soundObj.stop()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n pygame.display.update()\n","repo_name":"Smaug175/Pygame_learning","sub_path":"makinggames/2.20.py","file_name":"2.20.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42216425304","text":"from experiments.experiment import Experiment#\nfrom functools import partial\n\n# from models.baselines.poke_encoder_fc import PokeAE\nfrom models.conv_poke_encoder import ConvPokeAE\nfrom data.datamodule import StaticDataModule\n\n\nclass PokeEncoderModel(Experiment):\n\n\n def __init__(self,config,dirs,devices):\n super().__init__(config,dirs,devices)\n\n # intiliaze models\n self.datakeys = [\"poke\",\"flow\",\"images\",\"original_flow\"]\n\n\n\n self.config[\"architecture\"].update({\"in_size\": self.config[\"data\"][\"spatial_size\"][0]})\n\n model = ConvPokeAE\n\n if self.config[\"general\"][\"restart\"]:\n ckpt_path = self._get_checkpoint()\n self.ae = model.load_from_checkpoint(ckpt_path,map_location=\"cpu\",config=self.config)\n else:\n self.ae = model(self.config)\n # basic trainer is initialized in parent class\n # self.logger.info(\n # f\"Number of trainable parameters in model is {sum(p.numel() for p in self.ae.parameters())}\"\n # )\n\n self.ckpt_callback = self.ckpt_callback(filename='{epoch}-{lpips-val:.3f}',monitor='lpips-val',\n save_top_k=self.config[\"logging\"][\"n_saved_ckpt\"], mode='min')\n to_yaml_cb = self.add_ckpt_file()\n\n callbacks = [self.ckpt_callback,to_yaml_cb]\n if self.config[\"general\"][\"restart\"] and ckpt_path is not None:\n self.basic_trainer = partial(self.basic_trainer,resume_from_checkpoint=ckpt_path,callbacks=callbacks)\n else:\n self.basic_trainer = partial(self.basic_trainer,callbacks=callbacks)\n\n\n\n\n def train(self):\n # prepare data\n datamod = StaticDataModule(self.config[\"data\"],datakeys=self.datakeys)\n datamod.setup()\n n_batches_complete_train = len(datamod.train_dataloader())\n n_batches_complete_val = len(datamod.val_dataloader())\n n_train_batches = self.config[\"training\"][\"max_batches_per_epoch\"] if n_batches_complete_train > self.config[\"training\"][\"max_batches_per_epoch\"] else n_batches_complete_train\n n_val_batches = self.config[\"training\"][\"max_val_batches\"] if n_batches_complete_val > self.config[\"training\"][\"max_val_batches\"] else n_batches_complete_val\n\n if not self.is_debug:\n trainer = self.basic_trainer(limit_train_batches=n_train_batches, limit_val_batches=n_val_batches, limit_test_batches=n_val_batches)\n else:\n trainer = self.basic_trainer()\n\n trainer.fit(self.ae,datamodule=datamod)\n\n\n\n\n\n def test(self):\n pass\n","repo_name":"CompVis/ipoke","sub_path":"experiments/poke_encoder.py","file_name":"poke_encoder.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"47"} +{"seq_id":"26145501879","text":"from Interfaces import Set\nfrom DLList import DLList\nimport numpy as np\n\nclass ChainedHashTable(Set):\n class Node() :\n def __init__(self, key, value) :\n self.key = key\n self.value = value\n\n def __init__(self, dtype=DLList) :\n self.dtype = dtype\n self.d = 1\n self.t = self.alloc_table(2**self.d)\n self.z = 193759204821\n self.w = 31\n self.n = 0\n\n def alloc_table(self, n: int):\n t = np.zeros(n, dtype=np.object)\n for i in range(n):\n t[i] = self.dtype()\n return t\n\n\n def _hash(self, key : int) -> int :\n return self.z * hash(key) % (2**self.w) >> (self.w - self.d) \n\n def size(self) -> int:\n return self.n\n \n def find(self, key : object) -> object :\n # todo\n h = self._hash(key)\n for i in range(self.t[h].size()):\n if self.t[h].get(i).key == key:\n return self.t[h].get(i).value\n return None\n\n \n def add(self, key : object, value : object) :\n # todo\n if self.find(key) != None:\n return False\n if len(self.t) == self.n:\n self.resize()\n hash_value = self._hash(key)\n self.t[hash_value].append(self.Node(key, value))\n self.n += 1\n return True\n\n\n\n def remove(self, key : int) -> object:\n # todo\n if self.find(key) == None:\n return None\n else:\n hash_value = self._hash(key)\n list = self.t[hash_value]\n temp = None\n for i in range(len(list)):\n if list[i].key == key:\n self.n -= 1\n temp = list.remove(i)\n if len(self.t) > 3*self.n:\n self.resize()\n return temp\n\n \n def resize(self):\n # todo\n if self.n == len(self.t):\n self.d += 1\n if len(self.t) >= 3*self.n:\n self.d -= 1\n temp = self.alloc_table(2**self.d)\n for i in range(len(self.t)):\n for j in range(len(self.t[i])):\n current_ele = self.t[i].get(j)\n h = self._hash(current_ele.key)\n temp[h].append(current_ele)\n self.t = temp\n\n #len(self.t[h]) is not what you want\n #self.t[h].size() is what you want\n #self.t[h][j] is not what you want\n #self.t[h].get(j) is what you want\n\n\n def __str__(self):\n s = \"\\n\"\n for i in range(len(self.t)):\n s += str(i) + \" : \"\n for j in range(len(self.t[i])):\n k = self.t[i].get(j) # jth node at ith list\n s += \"(\" + str(k.key) + \", \" + str(k.value) + \"); \"\n\n s += \"\\n\"\n return s\n\n\n\n\n","repo_name":"TKD10/Bookstore","sub_path":"template/template/ChainedHashTable.py","file_name":"ChainedHashTable.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43747113383","text":"'''\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\nimport tensorflow as tf\n\na = tf.constant(2)\nb = tf.constant(3)\n\nwith tf.Session() as sess:\n print(\"a=2, b=3\")\n print(\"상수의 합: %i\" % sess.run(a+b))\n print(\"상수의 곱: %i\" % sess.run(a*b))\n print()\n''' result\na=2, b=3\n상수의 합: 5\n상수의 곱: 6\n'''\n\na = tf.placeholder(tf.int16)\nb = tf.placeholder(tf.int16)\n\nadd = tf.add(a, b)\nmul = tf.multiply(a, b)\n\nwith tf.Session() as sess:\n print(\"변수의 합: %i\" % sess.run(add, feed_dict={a: 2, b: 3}))\n print(\"변수의 곱: %i\" % sess.run(mul, feed_dict={a: 2, b: 3}))\n print()\n'''result\n변수의 합: 5\n변수의 곱: 6\n'''\n\nmatrix1 = tf.constant([[3., 3.]])\nmatrix2 = tf.constant([[2.], [2.]])\nproduct = tf.matmul(matrix1, matrix2)\n\nwith tf.Session() as sess:\n result = sess.run(product)\n print(result)\n'''result\n[[12.]]\n'''","repo_name":"tjwodud04/Books-and-other-stuffs","sub_path":"책, 튜토리얼(Following books, tutorials)/Tutorials/Tensorflow Tutorial/basic_operations.py","file_name":"basic_operations.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11535073480","text":"import numpy as np\nfrom numpy import linalg as LA\ndt = 0.1\nA = np.array([[1, dt, 0.5*dt**2], [0, 1, dt], [0, 0, 1]])\nB = np.array([1/6*dt**3, 0.5*dt**2, dt]).reshape(3, 1)\nR = 1.0\nweight = 4\ntx=1\nQ = weight * np.array([[1, tx, 0], [tx, tx*tx, 0], [0, 0, 0]])\nP = Q.copy()\n\ndef get_tx(x):\n return 1-x*0.01*2\n\nfor i in range(10000):\n # tx = get_tx(i*dt)\n # Q = weight * np.array([[1, tx, 0], [tx, tx*tx, 0], [0, 0, 0]])\n M = np.matmul(np.matmul(A.transpose(), P),B)\n L = (R + np.matmul(np.matmul(B.transpose(), P),B))\n N = np.linalg.inv(L)\n P = np.matmul(np.matmul(A.transpose(),P), A) + Q - np.matmul(np.matmul(M,N),M.transpose())\n\n K = - np.matmul(N, M.transpose()) \n S = A + np.matmul(B, K)-np.identity(np.shape(A)[0])\n\n w, _ = LA.eig(S)\n for item in w:\n if (item.real>=0):\n print (\" i:\",i*dt,\" s:\",w)\n\n# # Computed value function V = x^T P x\n# print(\"==P==\\n\",P)\n\n# # print dynamic gain K\n# print(\"==K==\\n\",K)\n\n# validate stability\nprint(\"==s==\\n\",S)\n\nw, _ = LA.eig(S)\nprint(\"==w==\\n\",w)","repo_name":"yanfang7722/debug_tools","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14782188680","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n'''\nAuthor: sunlei\nEmail: sunlei@cmcm.com\nLast modified: 2018-01-09 11:29:11\n'''\n\nimport visdom\nimport torch\nimport numpy as np\nfrom PIL import Image\n\nclass Visualizer():\n\n def __init__(self, port=8097):\n self.vis = visdom.Visdom(port=port)\n self.idx = 0\n self.data = {}\n\n def gen_idx(self):\n self.idx += 1\n return self.idx\n\n def convert(self, image):\n '''Convert image into numpy array'''\n if isinstance(image, list):\n return [self.convert(x) for x in image]\n elif isinstance(image, np.ndarray):\n return image.transpose([2,0,1])\n elif isinstance(image, torch.ByteTensor):\n return image.numpy().copy().transpose([2,0,1])\n elif isinstance(image, Image.Image):\n return np.array(image).transpose([2,0,1])\n else:\n raise TypeError('{} type not supported'.format(type(image)))\n\n def concat(self, images):\n '''Concat a list of different sizes images together'''\n height = 0\n width = 0\n for x in images:\n height = max(height, x.shape[1])\n width = max(width, x.shape[2])\n ret = np.ones([len(images), 3, height, width], dtype=np.uint8) * 255\n for i, x in enumerate(images):\n _, h, w = x.shape\n ret[i,:,:h,:w] = x\n return ret\n\n def image(self, im, title, idx=None):\n if idx is None:\n idx = self.gen_idx()\n im = self.convert(im)\n if isinstance(im, list):\n im = self.concat(im)\n nrow = im.shape[0]\n width, height = nrow * im.shape[3], im.shape[2]\n self.vis.images(im, nrow=nrow, opts=dict(title=title,\n padding=10,\n width=width,\n height=height), win=idx)\n else:\n width, height = im.shape[2], im.shape[1]\n self.vis.image(im, opts=dict(title=title, width=width,\n height=height), win=idx)\n return idx\n\n def line(self, x, y, legend, title='line', xlabel='x', ylabel='y', idx=None):\n if idx is None:\n idx = self.gen_idx()\n if idx not in self.data:\n self.data[idx] = {'X': [], 'Y': []}\n\n if isinstance(y, list) and len(y) == 1:\n y = y[0]\n self.data[idx]['X'].append(x)\n self.data[idx]['Y'].append(y)\n self.vis.line(\n X=np.array(self.data[idx]['X']),\n Y=np.array(self.data[idx]['Y']),\n opts={\n 'title': title,\n 'legend': legend,\n 'xlabel': xlabel,\n 'ylabel': ylabel\n },\n win=idx\n )\n return idx\n\n\nvis = Visualizer()\n\n\n\n\n\n\n\n\n\n","repo_name":"allyLei/deepvision","sub_path":"libs/visualizer/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73323109903","text":"from collections import Counter\nfrom collections import defaultdict\nfrom edgetpu.basic.basic_engine import BasicEngine\nimport numpy as np\nfrom PIL import Image\n\n\nclass EmbeddingEngine(BasicEngine):\n \"\"\"Engine used to obtain embeddings from headless mobilenets.\"\"\"\n\n def __init__(self, model_path):\n \"\"\"Creates a EmbeddingEngine with given model and labels.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: An error occurred when model output is invalid.\n \"\"\"\n BasicEngine.__init__(self, model_path)\n output_tensors_sizes = self.get_all_output_tensors_sizes()\n if output_tensors_sizes.size != 1:\n raise ValueError(\n ('Dectection model should have only 1 output tensor!'\n 'This model has {}.'.format(output_tensors_sizes.size)))\n\n def DetectWithImage(self, img):\n \"\"\"Calculates embedding from an image.\n\n Args:\n img: PIL image object.\n\n Returns:\n Embedding vector as np.float32\n\n Raises:\n RuntimeError: when model's input tensor format is invalid.\n \"\"\"\n input_tensor_shape = self.get_input_tensor_shape()\n if (input_tensor_shape.size != 4 or input_tensor_shape[3] != 3 or\n input_tensor_shape[0] != 1):\n raise RuntimeError(\n 'Invalid input tensor shape! Expected: [1, height, width, 3]')\n required_image_size = (input_tensor_shape[2], input_tensor_shape[1])\n with img.resize(required_image_size, Image.NEAREST) as resized_img:\n input_tensor = np.asarray(resized_img).flatten()\n return self.RunInference(input_tensor)[1]\n\n\nclass KNNEmbeddingEngine(EmbeddingEngine):\n \"\"\"Extends embedding engine to also provide kNearest Neighbor detection.\n\n This class maintains an in-memory store of embeddings and provides\n functions to find k nearest neighbors against a query emedding.\n \"\"\"\n\n def __init__(self, model_path, kNN=3):\n \"\"\"Creates a EmbeddingEngine with given model and labels.\n\n Args:\n model_path: String, path to TF-Lite Flatbuffer file.\n\n Raises:\n ValueError: An error occurred when model output is invalid.\n \"\"\"\n EmbeddingEngine.__init__(self, model_path)\n self.clear()\n self._kNN = kNN\n\n def clear(self):\n \"\"\"Clear the store: forgets all stored embeddings.\"\"\"\n self._labels = []\n self._embedding_map = defaultdict(list)\n self._embeddings = None\n\n def addEmbedding(self, emb, label):\n \"\"\"Add an embedding vector to the store.\"\"\"\n\n normal = emb/np.sqrt((emb**2).sum()) # Normalize the vector\n\n self._embedding_map[label].append(normal) # Add to store, under \"label\"\n\n # Expand labelled blocks of embeddings for when we have less than kNN\n # examples. Otherwise blocks that have more examples unfairly win.\n emb_blocks = []\n self._labels = [] # We'll be reconstructing the list of labels\n for label, embeds in self._embedding_map.items():\n emb_block = np.stack(embeds)\n if emb_block.shape[0] < self._kNN:\n emb_block = np.pad(emb_block,\n [(0,self._kNN - emb_block.shape[0]), (0,0)],\n mode=\"reflect\")\n emb_blocks.append(emb_block)\n self._labels.extend([label]*emb_block.shape[0])\n\n self._embeddings = np.concatenate(emb_blocks, axis=0)\n\n def kNNEmbedding(self, query_emb):\n \"\"\"Returns the self._kNN nearest neighbors to a query embedding.\"\"\"\n\n # If we have nothing stored, the answer is None\n if self._embeddings is None: return None\n\n # Normalize query embedding\n query_emb = query_emb/np.sqrt((query_emb**2).sum())\n\n # We want a cosine distance ifrom query to each stored embedding. A matrix\n # multiplication can do this in one step, resulting in a vector of\n # distances.\n dists = np.matmul(self._embeddings, query_emb)\n\n # If we have less than self._kNN distances we can only return that many.\n kNN = min(len(dists), self._kNN)\n\n # Get the N largest cosine similarities (larger means closer).\n n_argmax = np.argpartition(dists, -kNN)[-kNN:]\n\n # Get the corresponding labels associated with each distance.\n labels = [self._labels[i] for i in n_argmax]\n\n # Return the most common label over all self._kNN nearest neighbors.\n most_common_label = Counter(labels).most_common(1)[0][0]\n return most_common_label\n\n def exampleCount(self):\n \"\"\"Just returns the size of the embedding store.\"\"\"\n return sum(len(v) for v in self._embedding_map.values())\n\n\n","repo_name":"google-coral/project-teachable","sub_path":"embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"21531258887","text":"import setuptools\n\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='pygiftparserrgmf',\n version='0.0.5',\n author='Román Martínez',\n author_email='rgmf@riseup.net',\n install_requires=['ply'],\n description='Moodle GIFT files parser',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/rgmf/pygiftparser',\n packages=setuptools.find_packages(),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: POSIX :: Linux'\n ],\n python_requires='>=3.8',\n)\n","repo_name":"rgmf/pygiftparser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"3771061129","text":"import re\nimport sys\nimport argparse\n\ndef argparser():\n p = argparse.ArgumentParser()\n\n # Required parameters\n p.add_argument('--corpus', default=None, type=str, required=True)\n\n config = p.parse_args()\n return config\n\ndef cleaning(text):\n text = re.sub(r'[\\U00010000-\\U0010ffff][\\u20000000-\\u2fffffff][\\U0001f000-\\U0001ffff]', '', text) # Clean emoji\n text = re.sub(r'<.*?>', '', text) # Clean HTML tag\n text = re.sub(r'http\\S+', '', text) # url -> token\n text = re.sub(r'[\\w._-]+[@]\\w+[.]\\w+', '', text) # email -> token\n text = re.sub(r'\\d+[-.]\\d{3,4}[-.]\\d{3,4}', '', text) # phone number -> token\n text = re.sub(r'[!]{2,}', '!', text) # multiple !s -> !\n text = re.sub(r'[!]{2,}', '?', text) # multiple ?s -> ?\n text = re.sub(r'[-=+,#:^$@*\\\"※~&%ㆍ』┘\\\\‘|\\(\\)\\[\\]\\`\\'…》]','', text) # Clean special symbols\n\n return text\n\nif __name__=='__main__':\n config = argparser()\n \n with open(config.corpus, 'r', encoding='-utf-8', errors='ignore') as reader:\n for li, line in enumerate(reader):\n _line = line.split('\\t')\n label, text = _line[0], ' '.join(_line[1:])\n \n # Cleaning\n text = cleaning(text)\n\n if len(text) > 0:\n line = '{}\\t{}'.format(label, re.sub(r'\\n', ' ', text))\n sys.stdout.write(line+'\\n')","repo_name":"lyeoni/nlp-tutorial","sub_path":"news-category-classifcation/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":1355,"dataset":"github-code","pt":"47"} +{"seq_id":"22475042698","text":"import time\r\n\r\nfrom player import HumanPlayer, RandomComputerPlayer, GeniusComputer\r\n\r\n\r\nclass TicTacToe:\r\n def __init__(self):\r\n self.board = [' ' for _ in range(9)]\r\n self.current_winner = None\r\n\r\n def print_board(self):\r\n for row in [self.board[i * 3:(i + 1) * 3] for i in range(3)]:\r\n print('| ' + ' | '.join(row) + ' |')\r\n\r\n @staticmethod\r\n def print_board_num():\r\n number_board = [[str(i) for i in range(j * 3, (j + 1) * 3)] for j in range(3)]\r\n for row in number_board:\r\n print('| ' + ' | '.join(row) + ' |')\r\n\r\n def available_moves(self):\r\n return [i for i, spot in enumerate(self.board) if spot == ' ']\r\n\r\n def empty_spots(self):\r\n return ' ' in self.board\r\n\r\n def num_empty_spots(self):\r\n return self.board.count(' ')\r\n\r\n def make_move(self, spot, letter):\r\n if self.board[spot] == ' ':\r\n self.board[spot] = letter\r\n if self.winner(spot, letter):\r\n self.current_winner = letter\r\n return True\r\n return False\r\n\r\n def winner(self, spot, letter):\r\n # checking for row\r\n row_index = spot // 3\r\n row = self.board[row_index * 3: (row_index + 1) * 3]\r\n if all([spot == letter for spot in row]):\r\n return True\r\n\r\n # checking for column\r\n col_index = spot % 3\r\n col = [self.board[col_index + i * 3] for i in range(3)]\r\n if all([spot == letter for spot in col]):\r\n return True\r\n\r\n # checking for diagonal\r\n if spot % 2 == 0: # (0,2,4,6,8)\r\n diagonal1 = [self.board[i] for i in [0, 4, 8]]\r\n if all([spot == letter for spot in diagonal1]):\r\n return True\r\n diagonal2 = [self.board[i] for i in [2, 4, 6]]\r\n if all([spot == letter for spot in diagonal2]):\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef play(game, x_player, o_player, print_board=True):\r\n if print_board:\r\n game.print_board_num()\r\n\r\n letter = 'X'\r\n while game.empty_spots():\r\n if letter == 'O':\r\n spot = o_player.get_moves(game)\r\n else:\r\n spot = x_player.get_moves(game)\r\n\r\n if game.make_move(spot, letter):\r\n if print_board:\r\n print(f'{letter} made a move to spot {spot}')\r\n game.print_board()\r\n print('')\r\n if game.current_winner:\r\n if print_board:\r\n print(letter + ' wins!')\r\n return letter\r\n\r\n letter = 'O' if letter == 'X' else 'X'\r\n time.sleep(0.8)\r\n\r\n if print_board:\r\n print('It is a tie!')\r\n\r\n\r\nif __name__ == '__main__':\r\n x_player = HumanPlayer('X')\r\n o_player = GeniusComputer('O')\r\n t = TicTacToe()\r\n play(t, x_player, o_player, print_board=True)\r\n","repo_name":"Musawir-ap/tic-tac-toe","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"72384303501","text":"import numpy\nimport logging\nfrom sandbox.util.Sampling import Sampling\nfrom sandbox.predictors.LibSVM import LibSVM\n\nclass SVMLeafRank(LibSVM):\n \"\"\"\n This is a subclass of LibSVM which will do model selection before learning. \n \"\"\"\n def __init__(self, paramDict, folds, sampleSize=None, numProcesses=1):\n \"\"\"\n sampleSize is the number of randomly chosen examples to use for model \n selection \n \"\"\"\n super(SVMLeafRank, self).__init__()\n self.paramDict = paramDict\n self.folds = folds \n self.chunkSize = 2\n self.setMetricMethod(\"auc2\") \n self.sampleSize = sampleSize \n self.processes = numProcesses\n \n def generateLearner(self, X, y):\n \"\"\"\n Train using the given examples and labels, and use model selection to\n find the best parameters.\n \"\"\"\n if numpy.unique(y).shape[0] != 2:\n print(y)\n raise ValueError(\"Can only operate on binary data\")\n\n #Do model selection first \n if self.sampleSize == None: \n idx = Sampling.crossValidation(self.folds, X.shape[0])\n learner, meanErrors = self.parallelModelSelect(X, y, idx, self.paramDict)\n else: \n idx = Sampling.crossValidation(self.folds, self.sampleSize)\n inds = numpy.random.permutation(X.shape[0])[0:self.sampleSize]\n learner, meanErrors = self.parallelModelSelect(X[inds, :], y[inds], idx, self.paramDict)\n learner = self.getBestLearner(meanErrors, self.paramDict, X, y)\n \n return learner\n\n def getBestLearner(self, meanErrors, paramDict, X, y, idx=None):\n \"\"\"\n As we are using AUC we will look for the max value. \n \"\"\"\n return super(SVMLeafRank, self).getBestLearner(meanErrors, paramDict, X, y, idx, best=\"max\")\n \n def copy(self): \n \"\"\"\n Return a new copied version of this object. \n \"\"\"\n svm = SVMLeafRank(self.paramDict, self.folds, self.sampleSize)\n svm.setKernel(self.kernel,self.kernelParam)\n svm.setC(self.C)\n svm.setErrorCost(self.errorCost)\n svm.setPenalty(self.penalty)\n svm.setSvmType(self.type) \n svm.processes=self.processes\n svm.epsilon=self.epsilon\n svm.metricMethod = self.metricMethod\n svm.chunkSize = self.chunkSize\n svm.timeout = self.timeout\n svm.normModelSelect = svm.normModelSelect \n \n return svm \n","repo_name":"charanpald/sandbox","sub_path":"sandbox/ranking/leafrank/SVMLeafRank.py","file_name":"SVMLeafRank.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"74572457102","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom scipy.ndimage.filters import convolve\nfrom scipy.stats import linregress\n\nim = Image.open('silver/large.jpg')\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [1/3, 1/3, 1/3])\n\n\ndef reni_entropy(p, q):\n return (1 / (1 - q) * np.log(np.sum(np.power(p, q)))) if q != 1 else (-np.sum(p * np.log(p)))\n\nimg = rgb2gray(np.array(im))\nq = np.array(range(-2, 10))\nws = range(1, 20)\nns =[]\nfor w in ws:\n ns.append(reni_entropy(convolve(img, np.ones((w, w)), mode='constant')[::w, ::w] / np.mean(img), 10))\n\nx = -np.log(ws)\ny = ns\n\nsns.regplot(x=pd.Series(x, name='log of window size (log(ϵ))'),\n y=pd.Series(y, name='N(q, ϵ)'))\n\nlinregress(x, y).slope\ndef get_reni_dim(img, q):\n ws = range(1, 20)\n ns = []\n\n for w in ws:\n conv = convolve(img, np.ones((w, w)), mode='constant')[::w, ::w]\n ns.append(reni_entropy(conv / np.sum(conv), q))\n\n x = -np.log(ws)\n y = ns\n\n return linregress(x, y).slope\ndef get_reni_spectre(img, qs):\n return list(map(lambda x: get_reni_dim(img, x), qs))\nspec = get_reni_spectre(img, q)\nplt.plot(q, spec)\nplt.show()\n\nws = range(1, 20)\nns =[]\nfor w in ws:\n ns.append(reni_entropy(convolve(img, np.ones((w, w)), mode='constant')[::w, ::w] / np.mean(img), 1))\nx = -np.log(ws)\ny = ns\nlinregress(x, y).slope\nreni_entropy(convolve(img, np.ones((w, w)), mode='constant')[::w, ::w] / np.mean(img), 1)","repo_name":"Ilyalya/fractalka","sub_path":"laba3.py","file_name":"laba3.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33793260656","text":"\"\"\"\nPix2Pix file for US fetal brain with segmentation monitor\n\"\"\"\nimport os\nimport argparse\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nfrom keras.utils.vis_utils import plot_model\nimport matplotlib.pyplot as plt\n\nfrom gan_utils import *\nfrom makedir import *\n# from image_utils import normalization\n\ndef load_image_seg(sample_path):\n\t\"\"\"\n\tLoad and split the data (cam,US,seg)\n\n\tParameter\n\t---------\n\tsample_name : string\n\t\timage's path\n\n\tReturns\n\t------\n\tinput_image : tensorflow tensor\n\t\tinput imgage, i.e. CAM \n\n\treal_image : tensorflow tensor\n\t\treal image, i.e. US image\n\n\tseg_image : tensorflow tensor\n\t\tsegmentation mask, i.e. Ellispes\n\t\"\"\"\n\t\n\traw_image = tf.io.read_file(sample_path)\n\timage = tf.image.decode_png(raw_image, channels=3)\n\n\tw = tf.shape(image)[0]\n\tw = w // 3\n\tinput_image = image[:w, :, :]\n\treal_image = image[w:2*w, :, :]\n\tseg_mask = image[2*w:, :, :]\n\n\n\tinput_image = tf.cast(input_image, tf.float32)\n\treal_image = tf.cast(real_image, tf.float32)\n\tseg_mask = tf.cast(seg_mask, tf.float32)\n\n\treturn input_image, real_image, seg_mask\n\ndef aspect_ratio(main_path):\n\t\"\"\"\n\tCompute the aspect ratio of training sample\n\n\tParameters\n\t----------\n\tmain_path : string\n\t\tpath of training folder\n\n\tResults\n\t-------\n\taspect_ratio_list : list\n\t\taspect ratio of each sample\n\n\t\"\"\"\n\tsample_paths = [main_path + '/' + i for i in os.listdir(main_path)]\n\taspect_ratio_list = []\n\tfor path in sample_paths:\n\t\tinput_image, real_image, seg_mask = load_image_seg(path)\n\t\tratio = input_image.shape[0] / input_image.shape[1]\n\t\tprint(input_image.shape, ratio)\n\t\taspect_ratio_list.append(ratio)\n\t\n\treturn np.array(aspect_ratio_list)\n\t\n@tf.function()\ndef random_jitter(input_image, real_image, seg_mask):\n\t\"\"\"\n\tComplete image preprocessing for GAN\n\n\tParameters\n\t----------\n\tinput_image : tensorflow tensor\n\t\tinput imgage, i.e. CAM \n\n\treal_image : tensorflow tensor\n\t\treal image, i.e. US image\n\n\tReturns\n\t-------\n\tinput_image : tensorflow tensor\n\t\tcropped CAM \n\n\treal_image : tensorflow tensor\n\t\tcropped US image\n\t\"\"\"\n\t# Resizing to 286x286\n\tinput_image, real_image, seg_mask = resize_seg(input_image, real_image, seg_mask, 206, 286)\n\n\t# Random cropping back to 256x256\n\tinput_image, real_image, seg_mask = random_crop_seg(input_image, real_image, seg_mask, IMG_HEIGHT, IMG_WIDTH)\n\n\tif tf.random.uniform(()) > 0.5:\n\t\t# Random mirroring\n\t\tinput_image = tf.image.flip_left_right(input_image)\n\t\treal_image = tf.image.flip_left_right(real_image)\n\t\tseg_mask = tf.image.flip_left_right(seg_mask)\n\n\treturn input_image, real_image, seg_mask\n\ndef load_image_train(sample_path):\n\t\"\"\"\n\tLoad and preproces train_file\n\n\tParameters\n\t----------\n\timage_file : string\n\t\timage's path\n\n\tReturns\n\t-------\n\tinput_image : tensorflow tensor\n\t\tpreprocessed CAM \n\n\treal_image : tensorflow tensor\n\t\tpreprocessed US image\n\t\"\"\"\n\tinput_image, real_image, seg_mask = load_image_seg(sample_path)\n\tinput_image, real_image, seg_mask = random_jitter(input_image, real_image, seg_mask)\n\tinput_image, real_image, seg_mask = normalize_seg(input_image, real_image, seg_mask)\n\t# input_image, real_image, seg_mask = padding_seg(input_image, real_image, seg_mask)\n\n\treturn input_image, real_image, seg_mask\n\ndef load_image_test(image_file):\n\t\"\"\"\n\tLoad and preproces test_file\n\n\tParameters\n\t----------\n\timage_file : string\n\t\timage's path\n\n\tReturns\n\t-------\n\tinput_image : tensorflow tensor\n\t\tpreprocessed CAM \n\n\treal_image : tensorflow tensor\n\t\tpreprocessed US image\n\t\"\"\"\n\tinput_image, real_image, seg_mask = load_image_seg(image_file)\n\tinput_image, real_image, seg_mask = resize_seg(input_image, real_image, seg_mask,\n\t\t\t\t\t\t\t\t\tIMG_HEIGHT, IMG_WIDTH)\n\tinput_image, real_image, seg_mask = normalize_seg(input_image, real_image, seg_mask)\n\t# input_image, real_image, seg_mask = padding_seg(input_image, real_image, seg_mask)\n\n\treturn input_image, real_image, seg_mask\n\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='Pix2Pix GAN with segmentation control on the shape')\n\tparser.add_argument(\"attribute\", type=str, help=\"Attribute to classification task: 'Plane' or 'Brain_plane'\")\n\tparser.add_argument(\"clas\", type=str, help=\"Class to classification task: example 'Fetal brain' or 'Trans-cerebellum'\")\n\tparser.add_argument(\"-name_folder\", default='trial', help='Name ov sub-folder to save variable')\n\tparser.add_argument(\"-epochs\", default=200, type=int, help=\"Number of epochs\")\n\tparser.add_argument(\"-seg_epcs\", default=0, type=int, help=\"start of segmentation loss\")\n\tparser.add_argument(\"-transitional_epcs\", default=0, type=int, help=\"start of conditional traning\")\n\tparser.add_argument(\"-lambda_gan\", default=100, type=int, help=\"lambda, weight of L1\")\n\tparser.add_argument(\"-mu_seg\", default=100, type=int, help=\"mu, weight of L_seg\")\n\tparser.add_argument(\"-distance\", default='dice', type=str, help=\"distance, distance to compute the L_seg: 'dice' or 'hausdorff' \")\n\targs = parser.parse_args()\n\n\t# IMAGES PATH\n\ttrain_dict = {'Trans-cerebellum':'train_cut_seg', 'Trans-thalamic':'train_cut_seg', 'Trans-ventricular':'train_cut_seg'}\n\ttest_dict = {'Trans-cerebellum':'test_cut_seg', 'Trans-thalamic':'test_cut_seg', 'Trans-ventricular':'test_cut_seg'}\n\n\tmain_path_train = 'GAN/'+ args.attribute + '/' + args.clas + '/' + train_dict[args.clas]\n\tmain_path_test = 'GAN/'+ args.attribute + '/' + args.clas + '/' + test_dict[args.clas]\n\n\t# input_image, real_image = load_image(main_path_train + '/sample_8.png')\n\t\n\t## PREPROCESSING\n\tBUFFER_SIZE = len(os.listdir(main_path_train)) # The facade training set consist of 400 images\n\tBATCH_SIZE = 1 # The batch size of 1 produced better results for the U-Net in the original pix2pix experiment\n\tIMG_WIDTH = 256 # Each image is 256x256 in size\n\tIMG_HEIGHT = 176\n\t\n\t## ATTEMPS TO MAKE THE RIGHT FORM OF LOADER\n\t# input_image, real_image, seg_mask = load_image_train(main_path_train + '/sample_3.png')\n\t# input_image, real_image, seg_mask = input_image.numpy(), real_image.numpy(), seg_mask.numpy() \n\t# aspect_ratio_list = aspect_ratio(main_path_train) aspet ratio\n\n\t# MAKE tf.Dataset\n\ttrain_dataset = tf.data.Dataset.list_files(main_path_train + '/*.png')\t\n\ttrain_dataset = train_dataset.map(load_image_train,\n \t num_parallel_calls=tf.data.AUTOTUNE)\n\ttrain_dataset = train_dataset.shuffle(BUFFER_SIZE)\n\ttrain_dataset = train_dataset.batch(BATCH_SIZE)\n\n\t## VISUAL CHECK\n\t# for (cam, image, mask) in iter(train_dataset.take(1)):\n\t# \tcam, image, mask = cam.numpy(), image.numpy(), mask.numpy()\n\n\t# \tfig, ax = plt.subplots(nrows=1, ncols=3, figsize=(14,4))\n\t# \tax[0].imshow((cam[0,:,:,:]+1.)/2.)\n\t# \tax[1].imshow((image[0,:,:,:]+1.)/2., cmap='gray')\n\t# \tax[2].imshow(mask[0,:,:], cmap='gray')\n\t# \tplt.show()\n\t\t\t\n\ttest_dataset = tf.data.Dataset.list_files(main_path_test + '/*.png')\n\ttest_dataset = test_dataset.map(load_image_test,)\n\ttest_dataset = test_dataset.batch(BATCH_SIZE)\n\n\t## VISUAL CHECK\n\t# for (cam, image, mask) in iter(test_dataset.take(1)):\n\t# \tprint(cam.shape, image.shape, mask.shape)\n\t# \tcam, image, mask = cam.numpy(), image.numpy(), mask.numpy()\n\n\t# \tfig, ax = plt.subplots(nrows=1, ncols=3, figsize=(14,4))\n\t# \tax[0].imshow((cam[0,:,:,:] + 1.) / 2.)\n\t# \tax[1].imshow((image[0,:,:,:]+1.) /2., cmap='gray')\n\t# \tax[2].imshow(mask[0,:,:], cmap='gray')\n\t# \tplt.show()\n\n\t## MODEL\n\tOUTPUT_CHANNELS = 3\n\tLAMBDA = 100\n\t\n\t# TEST GENERATOR MODEL\n\t# generator = Generator(OUTPUT_CHANNELS, dim=(IMG_HEIGHT , IMG_WIDTH))\n\t# print(generator.summary())\n\tvgg16_generator = vgg16_unet(input_shape=(IMG_WIDTH,IMG_WIDTH,3), weight='Images_classification_Brain_plane/models/VGG_16_/train_16/checkpoint/weights/35.hdf5', trainable=True)\n\tprint(vgg16_generator.summary())\n\tgenerator = vgg16_generator\n\t# plot_model(generator, show_shapes=True, show_layer_names=True)\n\t# plt.figure()\n\t# plt.imshow(input_image)\n\t# plt.show()\n\n\t# gen_output = generator(input_image[tf.newaxis, ...], training=False)\n\t# plt.figure()\n\t# plt.imshow(normalization(gen_output[0, ...],0,1))\n\t\n\t## TEST DISCRIMINATOR MODEL\n\tdiscriminator = Discriminator(input_shape=(IMG_HEIGHT,IMG_WIDTH,3))\n\tprint(discriminator.summary())\n\t# # disc_out = discriminator([input_image[tf.newaxis, ...], gen_output], training=False)\n\t# # plt.figure()\n\t# # plt.imshow(disc_out[0, ..., -1], cmap='RdBu_r')\n\t# # plt.colorbar()\n\t# # plt.show()\n\n\t## LOAD SEGMENTATION MODEL\n\tsegmenter = tf.keras.models.load_model('Segmentation/weights_seg_paper/40.hdf5', compile=False)\n\tprint(segmenter.summary())\n\n\t## TRAINING\n\tEPOCHS = args.epochs * BUFFER_SIZE\n\tTIME_EPOCHS = BUFFER_SIZE\n\tCKP_EPOCHS = args.epochs * BUFFER_SIZE // 8\n\tTRANSITIONAL_STEP = args.transitional_epcs * BUFFER_SIZE # if transitional_step = EPOCHS the training is unconditional\n\tLOSSES_STEP = args.seg_epcs * BUFFER_SIZE \n\tlambda_gan = args.lambda_gan\n\tmu_seg = args.mu_seg\n\tdistance = args.distance\n\n\tgenerator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n\tdiscriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n\n\tsave_dict = 'GAN/'+ args.attribute + '/' + args.clas + '/' + args.name_folder\n\n\tcheckpoint_dir = save_dict + '/training_checkpoints'\n\tcheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n\n\tcheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n\t\t\t\t\t\t\t\t\tdiscriminator_optimizer=discriminator_optimizer,\n\t\t\t\t\t\t\t\t\tgenerator=generator,\n\t\t\t\t\t\t\t\t\tdiscriminator=discriminator)\n\t\n\treal_time_path = save_dict + '/GAN_real_time'\n\tlosses_path = save_dict + '/losses'\n\tsmart_makedir(real_time_path)\n\tsmart_makedir(losses_path)\n\n\tfit_seg(train_dataset, test_dataset, steps= EPOCHS, \n\t\tgenerator=generator, discriminator=discriminator,\n\t\tgenerator_optimizer=generator_optimizer, \n\t\tdiscriminator_optimizer=discriminator_optimizer,\n\t\tsegmenter = segmenter,\n\t\tcheckpoint = checkpoint,\n\t\tname = f'gen_image_step',\n\t\tsave_path_real_time = real_time_path,\n\t\tsave_path_losses= losses_path,\n\t\tcheckpoint_prefix = checkpoint_prefix,\n\t\ttime_steps = TIME_EPOCHS,\n\t\ttransitional_step = TRANSITIONAL_STEP,\n\t\tseg_step = LOSSES_STEP, \n\t\tsave_loss_steps = TIME_EPOCHS,\n\t\tcheckpoint_steps = CKP_EPOCHS,\n\t\tlambda_gan=lambda_gan, \n\t\tmu_seg=mu_seg,\n\t\tseg_distance=distance)\n\n\twith open(save_dict +'/summary.txt', 'w', encoding='utf-8') as file:\n\t\tfile.write(f'EPOCHS: {EPOCHS} \\n ')\n\t\tfile.write(f'TIME_EPOCHS: {TIME_EPOCHS} \\n ')\n\t\tfile.write(f'CKP_EPOCHS: {CKP_EPOCHS} \\n ')\n\t\tfile.write(f'TRANSITIONAL_STEP: {TRANSITIONAL_STEP} \\n ')\n\t\tfile.write(f'LOSSES_STEP: {LOSSES_STEP} \\n ')\n\t\tfile.write(f'lambda_gan: {lambda_gan} \\n ')\n\t\tfile.write(f'mu_gan: {mu_seg} \\n ')\n\t\tfile.write(f'distance: {distance} \\n ')\n\t\t\n","repo_name":"AngeloLasala/US_fetal_classification","sub_path":"US_fetal_classification/pix2pix_seg.py","file_name":"pix2pix_seg.py","file_ext":"py","file_size_in_byte":10539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8430679048","text":"from comet_ml import Experiment\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import datasets\nfrom torch.utils.data import DataLoader\nimport torchvision as tv\nimport torchvision.transforms as tr\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as opt\nimport argparse\nfrom tqdm import tqdm\nimport time\nimport json\nimport pandas as pd\nimport numpy as np\nimport os\n\nfrom typing import List, Tuple, Optional, Dict, NamedTuple, Union, Callable\nimport itertools\nimport string\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\n\n\n\ndef compute_precisions(\n predictions: torch.Tensor,\n targets: torch.Tensor,\n src_lengths: Optional[torch.Tensor] = None,\n minsep: int = 6,\n maxsep: Optional[int] = None,\n name: Optional[str] = None,\n count: Optional[str] = None,\n slen: Optional[int] = None,\n override_length: Optional[int] = None, # for casp\n):\n if isinstance(predictions, np.ndarray):\n predictions = torch.from_numpy(predictions)\n if isinstance(targets, np.ndarray):\n targets = torch.from_numpy(targets)\n if predictions.dim() == 2:\n predictions = predictions.unsqueeze(0)\n if targets.dim() == 2:\n targets = targets.unsqueeze(0)\n override_length = (targets[0, 0] >= 0).sum()\n\n # Check sizes\n if predictions.size() != targets.size():\n raise ValueError(\n f\"Size mismatch. Received predictions of size {predictions.size()}, \"\n f\"targets of size {targets.size()}\"\n )\n device = predictions.device\n \n # Elements for plot\n x, y = np.nonzero(targets.squeeze().cpu().numpy()) #extract ones\n c = np.full_like(x.astype(str), 'tab:gray')\n a = np.full_like(x.astype(float), 0.5)\n \n batch_size, seqlen, _ = predictions.size()\n seqlen_range = torch.arange(seqlen, device=device)\n\n sep = seqlen_range.unsqueeze(0) - seqlen_range.unsqueeze(1)\n sep = sep.unsqueeze(0)\n valid_mask = sep >= minsep\n valid_mask = valid_mask & (targets >= 0) # negative targets are invalid\n\n if maxsep is not None:\n valid_mask &= sep < maxsep\n\n if src_lengths is not None:\n valid = seqlen_range.unsqueeze(0) < src_lengths.unsqueeze(1)\n valid_mask &= valid.unsqueeze(1) & valid.unsqueeze(2)\n else:\n tmp = seqlen if int(slen) == 256 else int(slen)\n src_lengths = torch.full([batch_size], tmp, device=device, dtype=torch.long)\n\n predictions = predictions.masked_fill(~valid_mask, float(\"-inf\"))\n\n x_ind, y_ind = np.triu_indices(seqlen, minsep)\n predictions_upper = predictions[:, x_ind, y_ind]\n targets_upper = targets[:, x_ind, y_ind]\n\n topk = seqlen if int(slen) == 256 else int(slen)\n indices = predictions_upper.argsort(dim=-1, descending=True)[:, :topk]\n \n # Elements for plot\n l = indices[0, :int(topk/5)].cpu()\n al = np.append(a, np.ones(l.size(0)))\n a = np.append(a, np.ones(indices.size(1)))\n xl = np.append(x, x_ind[l])\n x = np.append(x, x_ind[indices.cpu()])\n yl = np.append(y, y_ind[l])\n y = np.append(y, y_ind[indices.cpu()])\n cl = np.append(c, targets_upper[0, l].cpu().numpy().astype(str))\n c = np.append(c, targets_upper[0, indices.cpu()].cpu().numpy().astype(str))\n c[(c == '1.0') | (c == '1')] = 'tab:blue'\n c[(c == '0.0') | (c == '0')] = 'tab:red'\n cl[(cl == '1.0') | (cl == '1')] = 'tab:blue'\n cl[(cl == '0.0') | (cl == '0')] = 'tab:red'\n \n f1 = plt.figure()\n f2 = plt.figure()\n \n ax1 = f1.add_subplot()\n ax1.scatter(x, y, s=5, c=c, alpha=a)\n ax1.grid(True, which='both')\n ax1.set_box_aspect(1)\n f1.savefig('img/' + str(count) + '_' + str(name) + '_L.png')\n \n ax2 = f2.add_subplot()\n ax2.scatter(xl, yl, s=5, c=cl, alpha=al)\n ax2.grid(True, which='both')\n ax2.set_box_aspect(1)\n f2.savefig('img/' + str(count) + '_' + str(name) + '_L5.png')\n \n plt.show()\n \n topk_targets = targets_upper[torch.arange(batch_size).unsqueeze(1), indices]\n if topk_targets.size(1) < topk:\n topk_targets = F.pad(topk_targets, [0, topk - topk_targets.size(1)])\n\n cumulative_dist = topk_targets.type_as(predictions).cumsum(-1)\n\n gather_lengths = src_lengths.unsqueeze(1)\n\n gather_indices = (\n torch.arange(0.1, 1.1, 0.1, device=device).unsqueeze(0) * gather_lengths\n ).type(torch.long) - 1\n\n binned_cumulative_dist = cumulative_dist.gather(1, gather_indices)\n binned_precisions = binned_cumulative_dist / (gather_indices + 1).type_as(\n binned_cumulative_dist\n )\n\n pl5 = binned_precisions[:, 1].mean()\n pl2 = binned_precisions[:, 4].mean()\n pl = binned_precisions[:, 9].mean()\n\n return {\"L\": pl, \"L/2\": pl2, \"L/5\": pl5}\n\n\ndef precision(\n predictions: torch.Tensor,\n targets: torch.Tensor,\n slen: Optional[int] = None,\n count: Optional[str] = None,\n) -> Dict[str, float]:\n if isinstance(targets, np.ndarray):\n targets = torch.from_numpy(targets)\n contact_ranges = [\n #(\"local\", 3, 6),\n #(\"short\", 6, 12),\n (\"MLR\", 12, None),\n (\"LR\", 24, None)\n ]\n metrics = {}\n targets = targets.to(predictions.device)\n for name, minsep, maxsep in contact_ranges:\n rangemetrics = compute_precisions(\n predictions,\n targets,\n minsep=minsep,\n maxsep=maxsep,\n name=name, #name of contact\n count=count, #name of protein\n slen=slen #sequence lenght\n )\n for key, val in rangemetrics.items():\n metrics[f\"{name}_{key}\"] = val.item()\n return metrics\n\n","repo_name":"LisaUnifi/CNN_protein","sub_path":"precision.py","file_name":"precision.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71383907663","text":"# 1. You are given a 2D array of size mxn. Rotate the array by 90 degrees clockwise. Assume n >= 1, m>=1.\r\n\r\n \r\n\r\n# Example 1:\r\n# | 1 2 |\r\n# | 3 4 |\r\n\r\n \r\n\r\n# | 3 1 |\r\n# | 4 2 | \r\n\r\n \r\n\r\n# Example: \r\n# | 1 2 3 4 5 |\r\n# | 6 7 8 9 10 | \r\n# | 11 12 13 14 15 | \r\n\r\n \r\n\r\n# | 11 6 1 |\r\n# | 12 7 2 |\r\n# | 13 8 3 |\r\n# | 14 9 4 |\r\n# | 15 10 5 | \r\n\r\ndef matrix(mat):\r\n rows = len(mat)\r\n cols = len(mat[0])\r\n out_mat = mat\r\n for i in range(rows):\r\n for j in range(cols):\r\n print(i,j)\r\n out_mat[i][j] = mat[j][i]\r\n\r\n return out_mat\r\n\r\nprint(matrix([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]))\r\n \r\n\r\n\r\n\r\n ","repo_name":"bjflamm/AirlineRoutes","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8172969055","text":"mxd = arcpy.mapping.MapDocument(\"CURRENT\")\nmxd.author = \"James Jones\"\nmxd.save()\ndf = arcpy.mapping.ListDataFrames(mxd, \"Layers\")[0]\nlyrFile = arcpy.mapping.Layer(r\"C:\\Users\\James Jones\\Documents\\ArcGIS\\NHL_Teams.lyr\")\narcpy.mapping.AddLayer(df, lyrFile)\narcpy.mapping.ExportToPDF(mxd, r'C:\\Users\\James Jones\\Documents\\ArcGIS\\NHL_Teams.pdf')\nlyr = arcpy.mapping.ListLayers(mxd)[0]\nlyr.name = \"NHL Teams\"\nlyr.visible = False\narcpy.RefreshTOC()\narcpy.RefreshActiveView()\nlyr.visible = True\narcpy.RefreshTOC()\narcpy.RefreshActiveView()\nlyrExtent = lyr.getSelectedExtent()\ndf.extent = lyrExtent\narcpy.mapping.ExportToPDF(mxd, r'C:\\Users\\James Jones\\Documents\\ArcGIS\\Hockey_Teams.pdf')\nPDFdoc = arcpy.mapping.PDFDocumentCreate(r'C:\\Users\\James Jones\\Documents\\ArcGIS\\'Hockey_Overview.pdf')\nPDFdoc.appendPages(r'C:\\Users\\James Jones\\Documents\\ArcGIS\\'NHL_Teams.pdf')\nPDFdoc.appendPages(r'C:\\Users\\James Jones\\Documents\\ArcGIS\\NHL_Teams.pdf')\nPDFdoc.appendPages(r'C:\\Users\\James Jones\\Documents\\ArcGIS\\Hockey_Teams.pdf')\nPDFdoc.saveAndClose()\n\n","repo_name":"jmjones0784/Assignment4","sub_path":"GettingStarted.py","file_name":"GettingStarted.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33476217190","text":"\nfrom collections import namedtuple\nUser = namedtuple(\"User\",[\"name\", \"age\", \"weight\"])\nuser = User(\"admin\", \"20\", \"60\")\nname, age, weight = user\nprint(user[0])\nprint(name, age, weight)\nprint(user.name, user.age, user.weight)\nprint(type(User))\n\n# 将序列直接转换为新的 tuple 对象\nuser1 = [\"root\",32,60]\nuser1 = User._make(user1)\nprint(user1)\n\n# 返回一个 dict\nuser = User(\"admin\", 20, 60)\nprint(user._asdict()) \n\nfrom collections import ChainMap\n\nuser1 = {\"name\":\"admin\", \"age\":\"20\"}\nuser2 = {\"name\":\"root\", \"weight\": 65}\nusers = ChainMap(user1, user2)\nprint(users.maps)\n\nusers.maps[0][\"name\"] = \"tiger\"\nprint(users.maps)\nprint(user1)\n\nfor key, value in users.items():\n print(key, value)\n\nfrom collections import deque\nq = deque([1, 2, 3])\nq.append('4')\nq.appendleft('0')\nprint(q)\nq.popleft()\nq.pop()\nprint(q)\n\nfrom collections import Counter\n\nanimals = [\"cat\", \"dog\", \"cat\", \"bird\", \"horse\", \"tiger\", \"horse\", \"cat\"]\nanimals_counter = Counter(animals)\nprint(animals_counter)\nprint(animals_counter.most_common(1))\nprint(animals_counter)\n\n\nfrom collections import OrderedDict\n\nuser = OrderedDict()\nuser[\"name\"] = \"admin\"\nuser[\"age\"] = 23\nuser[\"weight\"] = 65\nprint(user)\nuser.move_to_end(\"name\") # 将元素移动至末尾\nprint(user)\nuser.move_to_end(\"name\", last = False) # 将元素移动至开头\nprint(user)\nprint(user.keys())\nprint(type(user.values()))\n\n'''\ndefaultdict 是内置 dict 类的子类。它实现了当 key 不存在是返回默认值的功能,\n除此之外,与内置 dict 功能完全一样。\n'''\nfrom collections import defaultdict\n\ndefault_dict = defaultdict(int)\ndefault_dict[\"x\"] = 10\nprint(default_dict[\"x\"])\nprint(default_dict[\"y\"])\nprint(default_dict[\"z\"])\n\n\ndef getUserInfo():\n return {\n \"name\" : \"\",\n \"age\" : 0\n }\n#print(type(getUserInfo()))\n\ndefault_dict = defaultdict(getUserInfo)\nadmin = default_dict[\"admin\"]\nprint(admin)\nadmin[\"age\"] = 34\nprint(admin)\n\n# 输出如下\n{'name': '', 'age': 0}\n{'name': '', 'age': 34}","repo_name":"wushensi/Coder","sub_path":"module_collections.py","file_name":"module_collections.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26623089944","text":"#write\ndef write(jadi):\n \n f = open(\"kepo.txt\", \"w\")\n \n f.write(jadi)\n \n f.close()\n \n#read\ndef main():\n \n f = open(\"kepo.txt\", \"r\")\n if f.mode == \"r\":\n contents = f.read()\n print (contents)\n \nx = str(input(\"Nama: \"))\ny = str(input(\"Umur: \"))\nz = str(input(\"Alamat: \"))\nq = str(input(\"Email: \"))\no = str(input(\"Dosen Wali: \"))\njadi = (\"{}\\n{}\\n{}\\n{}\\n{}\\n\". format(x,y,z,q,o))\n\nprint(\"\\n\")\nprint(\"Loading. . . \")\nwrite(jadi)\nprint(\"\")\nmain()\n","repo_name":"kevinputratjahjono/Algoritma","sub_path":"Laporan 9/Biodata.py","file_name":"Biodata.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7561801052","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\n# Values found in school field that are actually areas\nSCHOOLS_THAT_ARE_ACTUALLY_AREAS = ['innovationrca', 'helenhamlyn', 'rectorate']\n\n\ndef populate_new_staff_taxonomy_fields(apps, schema_editor):\n Programme = apps.get_model('taxonomy.Programme')\n School = apps.get_model('taxonomy.School')\n Area = apps.get_model('taxonomy.Area')\n StaffPageRole = apps.get_model('rca.StaffPageRole')\n StaffPage = apps.get_model('rca.StaffPage')\n\n for staff_page_role in StaffPageRole.objects.all().iterator():\n update_fields = []\n\n # Remap some schools to areas\n if staff_page_role.school in SCHOOLS_THAT_ARE_ACTUALLY_AREAS:\n staff_page_role.area = staff_page_role.school\n staff_page_role.school = ''\n\n if staff_page_role.school:\n staff_page_role.school_new = School.objects.get(slug=staff_page_role.school)\n update_fields.append('school_new')\n\n if staff_page_role.programme:\n staff_page_role.programme_new = Programme.objects.get(slug=staff_page_role.programme)\n update_fields.append('programme_new')\n\n if staff_page_role.area:\n staff_page_role.area_new = Area.objects.get(slug=staff_page_role.area)\n update_fields.append('area_new')\n\n if update_fields:\n staff_page_role.save(update_fields=update_fields)\n\n for staff_page in StaffPage.objects.all().iterator():\n update_fields = []\n\n # NOTE: The school field actually points to areas\n if staff_page.school:\n staff_page.area = Area.objects.get(slug=staff_page.school)\n staff_page.save(update_fields=['area'])\n\n\ndef do_nothing(apps, schema_editor):\n pass # Allows us to reverse this migration\n\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rca', '0025_auto_20160701_1415'),\n ]\n\n operations = [\n migrations.RunPython(populate_new_staff_taxonomy_fields, do_nothing),\n ]\n","repo_name":"torchbox/verdant-rca","sub_path":"django-verdant/rca/migrations/0026_auto_20160701_1417.py","file_name":"0026_auto_20160701_1417.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"39753862265","text":"import numpy as np\nimport jax.numpy as jnp\nimport cmath\n\n\n### Theta Calculation\ndef thetaCalc(spins, weights, hidBias):\n\ttheta = hidBias + jnp.dot(weights,spins)\n\t'''theta = np.array([complex(0.,0.) for i in range(len(hidBias))])\n\t\t\t\tfor i in range(len(hidBias)):\n\t\t\t\t\tfor j in range(len(spins)):\n\t\t\t\t\t\ttheta[i] += weights[i,j]*spins[j]\n\t\t\t'''\n\treturn theta\n\n\n### Local Energy Calculation\ndef LocalEnergy(spins, weights, visBias, hidBias):\n\n\tELoc = np.array([complex(0.,0.) for i in range(len(spins))])\n\tshiftedSpins = np.array([spins[(i+1)%5] for i in range(5)])\n\tweights2 = weights.T\n\n\t### 1st ELoc contribution\n\tELocJ = jnp.multiply(spins,shiftedSpins)\n\tELocJ = complex(1.,0.)\n\t#print(jnp.sum(ELocJ))\n\n\ttheta = thetaCalc(spins,weights,hidBias)\n\tpreFact = jnp.exp(-2*jnp.multiply(visBias,spins))\n\t#preFact = 1.\n\t#print(preFact)\n\tfor i in range(len(spins)):\n\t\tmultArray = jnp.divide(jnp.cosh(theta - 2 * weights2[i]*spins[i]),jnp.cosh(theta))\n\t\t### 2nd & 3rd ELoc contributions\n\t\t#print(\"Here\")\n\t\t#print(preFact[i]*jnp.prod(multArray))\n\t\tELoc[i] += preFact[i]*jnp.prod(multArray)\n\t\t#print(jnp.prod(multArray))\n\t\t#ELoc[i] += 1j*(-1)**((1+spins[i])/2)*jnp.prod(multArray) * preFact[i]\n\t#print(ELoc)\n\t#print(multArray)\n\tELoc = jnp.sum(ELoc) + ELocJ\n\t#print(ELoc)\n\treturn ELoc\n\ndef LocalEnergy_np(spins, weights, visBias, hidBias):\n\t#ELoc = np.array([complex(0.,0.) for i in range(len(spins))])\n\tweights2 = weights.T\n\tELocJ = complex(0.,0.)\n\ttheta = thetaCalc(spins, weights, hidBias)\n\n\tpreFactReal = np.array([complex(0.,0.) for i in range(5)])\n\tpreFactImag = np.array([complex(0.,0.) for i in range(5)])\n\tmultArrayReal = np.array([complex(1.,0.) for i in range(5)])\n\tmultArrayImag = np.array([complex(0.,1.) for i in range(5)])\n\n\tmultProdReal = complex(1.,0.)\n\tmultProdImag = complex(0.,1.)\n\n\tfor i in range(len(spins)):\n\t\tELocJ += spins[i]*spins[(i+1)%5]\n\t\tpreFactReal[i] = np.exp(-2*visBias[i].real*spins[i])\n\t\tpreFactImag[i] = np.exp(-2*visBias[i].imag*spins[i])\n\t\tfor j in range(len(hidBias)):\n\t\t\tnumerReal = np.cosh(theta[i].real - weights2[i][j].real*spins[i])\n\t\t\tdenomReal = np.cosh(theta[i].real)\n\t\t\tmultArrayReal[i] *= numerReal/denomReal\n\n\t\t\tnumerImag = np.cosh(theta[i].imag - weights2[i][j].imag*spins[i])\n\t\t\tdenomImag = np.cosh(theta[i].imag)\n\t\t\tmultArrayImag[i] *= numerImag/denomImag\n\t\tmultProdReal *= preFactReal[i] * multArrayReal[i]\n\t\tmultProdImag *= preFactImag[i] * multArrayImag[i]*1j\n\n\tELoc = ELocJ + multProdReal + multProdImag\n\treturn ELoc\n\n\ndef O_Deriv_np(spins, weights, visBias, hidBias):\n\tnumHid = len(hidBias)\n\tnumVis = len(visBias)\n\tO_a = spins\n\n\ttheta = thetaCalc(spins, weights, hidBias)\n\tO_b = np.array([complex(0.,0.) for i in range(numHid)])\n\n\tfor i in range(numHid):\n\t\tO_b[i] = np.tanh(theta[i])\n\tO_WReal = np.array([[complex(1.,0.) for i in range(numVis)] for j in range(numHid)])\n\tO_WImag = np.array([[complex(0.,1.) for i in range(numVis)] for j in range(numHid)])\n\tfor i in range(numHid):\n\t\tfor j in range(numVis):\n\t\t\tO_WReal[i][j] = O_b[i].real * spins[j]\n\t\t\tO_WImag[i][j] = O_b[i].imag * spins[j]\n\tO_W = O_WReal + O_WImag\n\tO_W = O_W.flatten()\n\n\tStackDev = np.concatenate([O_W,O_a,O_b])\n\tStackDev = StackDev.flatten()\n\treturn StackDev\n\n\ndef O_Deriv(spins, weights, visBias, hidBias):\n\n\tnumHid = len(hidBias)\n\tnumVis = len(visBias)\n\t#print(numVis)\n\t### Calculate derivatives\n\tO_a = spins\n\ttheta = thetaCalc(spins, weights, hidBias)\n\tO_b = jnp.tanh(theta)\n\t#print(O_b)\n\t#print(spins)\n\t'''O_W = np.array([[np.complex(0.,0.) for i in range(numVis)] for j in range(numHid)])\n\tfor i in range(numHid):\n\t\tfor j in range(numVis):\n\t\t\tO_W[i][j] = O_b[i]*np.complex(spins[j],0.)'''\n\n\tO_W = jnp.kron(spins, O_b)\n\t#O_W2 = jnp.kron(O_b,spins)\n\t#O_W = O_W.flatten()\n\t#O_W2 = np.reshape(O_W2,(numHid,numVis))\n\t#print(O_W-O_W2)\n\t#print(np.shape(O_W))\n\tStackDev = np.concatenate([O_W,O_a,O_b])\n\tStackDev = StackDev.flatten()\n\t#print(np.shape(StackDev))\n\n\treturn StackDev\n\n\n","repo_name":"cssmith36/NNJax","sub_path":"StochasticReconfiguration.py","file_name":"StochasticReconfiguration.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41882900427","text":"import kf_book.book_plots as book_plots\nimport numpy as np\nfrom numpy.random import randn\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nimport filterpy.stats as stats\nimport kf_book.kf_internal as kf_internal\nfrom kf_book.kf_internal import DogSimulation\n\nxs = range(500)\nys = randn(500)*1.0 + 10.0\n#plt.plot(xs, ys)\n# plt.show()\n\n####################\n\ngaussian = namedtuple('Gaussian', ['mean', 'var'])\n#gaussian.__repr__ = lambda s: '𝒩(μ={:.3f}, 𝜎²={:.3f})'.format(s[0], s[1])\n\ng1 = gaussian(3.4, 10.1)\ng2 = gaussian(mean=4.5, var=0.2**2)\nprint(g1)\nprint(g2)\n\n#####################\n\n\ndef predict(pos, movement):\n return gaussian(pos.mean + movement.mean, pos.var + movement.var)\n\n\ndef gaussian_multiply(g1, g2):\n mean = (g1.var * g2.mean + g2.var * g1.mean)/(g1.var + g2.var)\n variance = (g1.var*g2.var)/(g1.var + g2.var)\n return gaussian(mean, variance)\n\n\ndef update(prior, likelihood):\n posterior = gaussian_multiply(likelihood, prior)\n return posterior\n\n\npos = gaussian(10.0, 0.5**2)\nmove = gaussian(25.0, 0.7**2)\nestimated_pos = update(pos, move)\nprint(estimated_pos)\n\nxs = np.arange(7, 30, 0.1)\n\nys = [stats.gaussian(x, pos.mean, pos.var) for x in xs]\n#plt.plot(xs, ys, label='$\\mathcal{N}(10,0.04)$')\n\nys = [stats.gaussian(x, move.mean, move.var) for x in xs]\n#plt.plot(xs, ys, label='$\\mathcal{N}(15,0.49)$', ls='--')\n\nys = [stats.gaussian(x, estimated_pos.mean, estimated_pos.var) for x in xs]\n#plt.plot(xs, ys, label='$\\mathcal{N}(25,0.43)$', ls='-.')\n\nplt.legend()\n# plt.show()\n\n######################\n\nnp.random.seed(13)\n\nprocess_var = 2. # variance in the dog's movement\nsensor_var = 4.5 # variance in the sensor\n\nx = gaussian(0., 20.**2) # dog's position, N(0, 20**2)\nvelocity = 1\ndt = 1. # time step in seconds\nprocess_model = gaussian(velocity*dt, process_var) # displacement to add to x\nN = 25\n\n# simulate dog and get measurements\ndog = DogSimulation(\n x0=x.mean,\n velocity=process_model.mean,\n measurement_var=sensor_var,\n process_var=process_model.var)\n\n# create list of measurements\nzs = [dog.move_and_sense() for _ in range(N)]\n\nprint('PREDICT\\t\\t\\tUPDATE')\nprint(' x var\\t\\t z\\t x var')\n\n# perform Kalman filter on measurement z\nxs, priors = np.zeros((N, 2)), np.zeros((N, 2))\n\nfor i, z in enumerate(zs):\n prior = predict(x, process_model)\n likelihood = gaussian(z, sensor_var)\n x = update(prior, likelihood)\n\n priors[i] = prior\n xs[i] = x\n kf_internal.print_gh(prior, x, z)\n\nprint()\nprint('final estimate: {:10.3f}'.format(x.mean))\nprint('actual final position: {:10.3f}'.format(dog.x))\nprint(xs)\nprint(priors)\n\n'''\nbook_plots.plot_measurements(zs)\nbook_plots.plot_filter(xs[:, 0], var=priors[:, 1])\nbook_plots.plot_predictions(priors[:, 0])\nbook_plots.show_legend()\nkf_internal.print_variance(xs)\nplt.show()\n'''\n\n#################\n\n\ndef volt(voltage, std):\n return voltage + (randn()*std)\n\ntemp_change = 0\nvoltage_std = 0.13000\nprocess_var = 0.05**2\nactual_voltage = 16.3\n\nx = gaussian(25.0, 1000.0)\nprocess_model = gaussian(0.0, process_var)\n\nN = 50\nzs=[volt(actual_voltage, voltage_std) for i in range(N)]\nps = []\nestimates = []\n\nfor z in zs:\n prior = predict(x, process_model)\n x = update(prior, gaussian(z, voltage_std**2))\n\n #save\n estimates.append(x.mean)\n ps.append(x.var)\n\n#plot\nbook_plots.plot_measurements(zs)\nbook_plots.plot_filter(estimates, var=np.array(ps))\nbook_plots.show_legend()\nplt.ylim(16, 17)\nbook_plots.set_labels(x='step', y='volts')\nplt.show()\n \nplt.plot(ps)\nplt.title('Variance')\nprint('Variance converges to {:.3f}'.format(ps[-1]))\n","repo_name":"tkiethuynh/basickalman","sub_path":"Basics/one_dim_kalman.py","file_name":"one_dim_kalman.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12223237497","text":"from mesa.agent import Agent\nimport numpy as np\nimport requests\nimport math\nimport pandas as pd\nimport json\n\n#Constants\nkmh_to_grdm = (111.32)*60/3.6\ngrd_to_m = (111.32*1000)\n\nclass BikeAgent(Agent):\n def __init__(self,model, unique_id, id_dest, id_orig,route,pos,speed):\n \n super().__init__(unique_id, model)\n self.moving = True\n self.unique_id = unique_id\n self.id_dest = id_dest\n self.id_orig = id_orig\n self.route = route\n self.cnt_route = 0\n self.checkin = False\n self.checkout = False\n self.wait_cnt = 0\n self.pos = pos\n self.duration = 0\n self.distance = 0\n self.speed = speed\n\n def move(self):\n global kmh_to_grdm\n self.model.space.move_agent(self,self.pos, self.speed/kmh_to_grdm)\n\n \n \n \n \n def step(self):\n\n station_orig, station_dest = self.get_station()\n \n if((self.checkin ==False)):\n if(self.model.model_type != 1):\n station_orig, station_dest = self.check_incentive(station_orig,station_dest)\n if((station_orig.dock_bikes > 0)):\n station_orig.dock_bikes -= 1\n station_orig.free_bases += 1\n if((station_orig.priority == 1) & (self.model.model_type != 1)):\n self.model.checkin_incentive +=1\n #Get route\n ini_pos = [station_orig.latitude,station_orig.longitude]\n fin_pos = [station_dest.latitude,station_dest.longitude]\n self.pos = ini_pos\n self.route, self.duration,self.distance = self.get_route(ini_pos,fin_pos)\n self.checkin =True\n else:\n dist, station_f = self.get_orig_station_dock_bike(station_orig)\n if(dist 0):\n self.checkout = True\n else:\n dist,station_f = self.get_dest_station_free_base(station_dest)\n if(dist int:\n return len(os.sched_getaffinity(0))\n\n\ndef out_of_date(path: Union[str, Path], *deps: Union[str, Path]) -> bool:\n try:\n mtime = os.stat(path).st_mtime\n except FileNotFoundError:\n return True\n return any(os.stat(dep).st_mtime > mtime for dep in deps)\n\n\ndef _c_isdigit(c: int) -> bool:\n # '0' <= c <= '9'\n return 0x30 <= c <= 0x39\n\n\ndef _c_isalpha(c: int) -> bool:\n # ('A' <= c <= 'Z') or ('a' <= c <= 'z')\n return (0x41 <= c <= 0x5A) or (0x61 <= c <= 0x7A)\n\n\ndef _order(c: int) -> int:\n if _c_isdigit(c):\n return 0\n elif _c_isalpha(c):\n return c\n elif c == 0x7E: # '~'\n return -1\n else:\n return c + 0x100\n\n\ndef verrevcmp(v1: str, v2: str) -> int:\n \"\"\"\n Compare two versions according to the coreutils version sort rules\n (https://www.gnu.org/software/coreutils/manual/html_node/Version_002dsort-ordering-rules.html).\n Returns 0 if v1 == v2 by this definition, < 0 if v1 < v2, and > 0 if v1 >\n v2.\n\n Adapted from\n https://git.savannah.gnu.org/cgit/gnulib.git/tree/lib/filevercmp.c.\n \"\"\"\n # By definition, version sort compares ASCII, not Unicode:\n # https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-ignores-locale.html.\n s1 = bytearray(v1, \"utf-8\")\n s2 = bytearray(v2, \"utf-8\")\n s1_len = len(s1)\n s2_len = len(s2)\n # Add sentinels to avoid some length checks.\n s1.append(0)\n s2.append(0)\n s1_pos = s2_pos = 0\n while s1_pos < s1_len or s2_pos < s2_len:\n while (s1_pos < s1_len and not _c_isdigit(s1[s1_pos])) or (\n s2_pos < s2_len and not _c_isdigit(s2[s2_pos])\n ):\n s1_c = _order(s1[s1_pos]) if s1_pos < s1_len else 0\n s2_c = _order(s2[s2_pos]) if s2_pos < s2_len else 0\n if s1_c != s2_c:\n return s1_c - s2_c\n s1_pos += 1\n s2_pos += 1\n while s1[s1_pos] == 0x30: # '0'\n s1_pos += 1\n while s2[s2_pos] == 0x30: # '0'\n s2_pos += 1\n first_diff = 0\n while _c_isdigit(s1[s1_pos]) and _c_isdigit(s2[s2_pos]):\n if not first_diff:\n first_diff = s1[s1_pos] - s2[s2_pos]\n s1_pos += 1\n s2_pos += 1\n if _c_isdigit(s1[s1_pos]):\n return 1\n if _c_isdigit(s2[s2_pos]):\n return -1\n if first_diff:\n return first_diff\n return 0\n\n\n@total_ordering\nclass KernelVersion:\n \"\"\"\n Version ordered by verrevcmp(), with -rc releases before the final release.\n \"\"\"\n\n def __init__(self, release: str) -> None:\n self._release = release\n # ~ sorts before anything, including the end of the version.\n self._key = re.sub(r\"-(rc[0-9])\", r\"~\\1\", release)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, KernelVersion):\n return NotImplemented\n return self._key == other._key\n\n def __lt__(self, other: object) -> bool:\n if not isinstance(other, KernelVersion):\n return NotImplemented\n return verrevcmp(self._key, other._key) < 0\n\n def __str__(self) -> str:\n return self._release\n\n\nNORMALIZED_MACHINE_NAME = platform.machine()\nif NORMALIZED_MACHINE_NAME.startswith(\"aarch64\") or NORMALIZED_MACHINE_NAME == \"arm64\":\n NORMALIZED_MACHINE_NAME = \"aarch64\"\nelif NORMALIZED_MACHINE_NAME.startswith(\"arm\") or NORMALIZED_MACHINE_NAME == \"sa110\":\n NORMALIZED_MACHINE_NAME = \"arm\"\nelif re.fullmatch(r\"i.86\", NORMALIZED_MACHINE_NAME):\n NORMALIZED_MACHINE_NAME = \"i386\"\nelif NORMALIZED_MACHINE_NAME.startswith(\"ppc64\"):\n NORMALIZED_MACHINE_NAME = \"ppc64\"\nelif NORMALIZED_MACHINE_NAME.startswith(\"ppc\"):\n NORMALIZED_MACHINE_NAME = \"ppc\"\nelif NORMALIZED_MACHINE_NAME == \"riscv\":\n NORMALIZED_MACHINE_NAME = \"riscv32\"\nelif re.match(r\"sh[0-9]\", NORMALIZED_MACHINE_NAME):\n NORMALIZED_MACHINE_NAME = \"sh\"\nelif NORMALIZED_MACHINE_NAME == \"sun4u\":\n NORMALIZED_MACHINE_NAME = \"sparc64\"\n\nif NORMALIZED_MACHINE_NAME == \"x86_64\":\n if sys.maxsize > 2**32:\n SYS = {\"bpf\": 321, \"kexec_file_load\": 320, \"rt_sigtimedwait\": 128}\n else: # x32\n SYS = {\"bpf\": 321, \"kexec_file_load\": 320, \"rt_sigtimedwait\": 523}\nelse:\n SYS = {\n \"aarch64\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"alpha\": {\"bpf\": 515, \"rt_sigtimedwait\": 355},\n \"arc\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"arm\": {\n \"bpf\": 386,\n \"kexec_file_load\": 401,\n \"rt_sigtimedwait\": 177,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"csky\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"hexagon\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"i386\": {\"bpf\": 357, \"rt_sigtimedwait\": 177, \"rt_sigtimedwait_time64\": 421},\n \"ia64\": {\"bpf\": 317, \"rt_sigtimedwait\": 159},\n \"loongarch\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"loongarch64\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"m68k\": {\"bpf\": 354, \"rt_sigtimedwait\": 177, \"rt_sigtimedwait_time64\": 421},\n \"microblaze\": {\n \"bpf\": 387,\n \"rt_sigtimedwait\": 177,\n \"rt_sigtimedwait_time64\": 421,\n },\n # TODO: mips is missing here because I don't know how to distinguish\n # between the o32 and n32 ABIs.\n \"mips64\": {\"bpf\": 315, \"rt_sigtimedwait\": 126},\n \"nios2\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"openrisc\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"parisc\": {\n \"bpf\": 341,\n \"kexec_file_load\": 355,\n \"rt_sigtimedwait\": 177,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"parisc64\": {\"bpf\": 341, \"kexec_file_load\": 355, \"rt_sigtimedwait\": 177},\n \"ppc\": {\"bpf\": 361, \"rt_sigtimedwait\": 176, \"rt_sigtimedwait_time64\": 421},\n \"ppc64\": {\"bpf\": 361, \"rt_sigtimedwait\": 176},\n \"riscv32\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"riscv64\": {\n \"bpf\": 280,\n \"kexec_file_load\": 294,\n \"rt_sigtimedwait\": 137,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"s390\": {\n \"bpf\": 351,\n \"kexec_file_load\": 381,\n \"rt_sigtimedwait\": 177,\n \"rt_sigtimedwait_time64\": 421,\n },\n \"s390x\": {\"bpf\": 351, \"kexec_file_load\": 381, \"rt_sigtimedwait\": 177},\n \"sh\": {\"bpf\": 375, \"rt_sigtimedwait\": 177, \"rt_sigtimedwait_time64\": 421},\n \"sparc\": {\"bpf\": 349, \"rt_sigtimedwait\": 105, \"rt_sigtimedwait_time64\": 421},\n \"sparc64\": {\"bpf\": 349, \"rt_sigtimedwait\": 105},\n \"xtensa\": {\"bpf\": 340, \"rt_sigtimedwait\": 229, \"rt_sigtimedwait_time64\": 421},\n }.get(NORMALIZED_MACHINE_NAME, {})\n","repo_name":"osandov/drgn","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7837,"program_lang":"python","lang":"en","doc_type":"code","stars":1531,"dataset":"github-code","pt":"47"} +{"seq_id":"4078275266","text":"# imos法 いもす法かと思いきや、いもす法ではO(N+max(A))かかるので間に合わない\n# いもす法では時系列でシミュレートをしているが、その代わりにログイン/ログアウトが起こる\n# イベントの時刻をソートして、前のイベントが起きてから次のイベントが起こるまでの時間は\n# ログイン人数が変わらないことを用いてログイン人数をカウントする。\n# いもす法と同様に、ログイン・ログアウトした人物を区別しないのがややポイント。\n# 計算量はO(NlogN)\nfrom operator import attrgetter\nclass event():\n def __init__(self, time, type):\n self.time = time\n self.type = type\n\nN = int(input())\nevents = []\nfor _ in range(N):\n a, b = [int(s) for s in input().split()]\n events.append(event(a, 'login'))\n events.append(event(b+a, 'logout'))\n \nevents.sort(key=attrgetter('time'))\nans = [0 for _ in range(N+1)]\ncnt = 1\nlast = events[0]\nfor e in events[1:]:\n #print(e.time, e.type)\n if e.type == 'login':\n ans[cnt] += e.time - last.time\n cnt += 1\n else:\n ans[cnt] += e.time - last.time\n cnt -= 1\n last = e\nprint(\" \".join([str(n) for n in ans[1:]]))","repo_name":"alkshmir/atcoder","sub_path":"abc221/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37785807070","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author : yixuan yang\n# @File : CL.py\n\nimport util.basic as basic\nimport util.vo as vo\n\n#if __name__ == \"__main__\":\ndef cl(adjMat, obj, attr):\n\n numObj = len(obj)\n numAttr = len(attr)\n\n bpcObj = basic.BasicCL().getBPCliqueObj(adjMat,obj,attr,numObj,numAttr)\n bpcAttr = basic.BasicCL().getBPCliqueAttr(adjMat,obj,attr,numObj,numAttr)\n\n objResult = basic.BasicCL().objRes(obj,attr,bpcObj,bpcAttr)\n\n bp = basic.BasicCL().finalBpcAll(objResult,bpcObj, bpcAttr)\n\n bpCliques = bp.__getitem__(0)\n\n attrResult = bp.__getitem__(1)\n\n\n unspcBpcliques = bpCliques.copy()\n\n spcObj = []\n for i in range(len(obj)):\n spcObj.append(obj.__getitem__(i))\n spcAttr = []\n for i in range(len(attr)):\n spcAttr.append(attr.__getitem__(i))\n spcObj = tuple(spcObj)\n spcAttr = tuple(spcAttr)\n\n spcC1 = vo.Pair(spcObj,())\n spcC2 = vo.Pair((),spcAttr)\n bpCliques.append(spcC1)\n bpCliques.append(spcC2)\n\n objResult.add(spcObj)\n objResult.add(tuple())\n\n attrResult.add(spcAttr)\n attrResult.add(tuple())\n\n\n #for temp in bpCliques:\n #print(temp.getL(),\"#\",temp.getR())\n\n\n #print(objResult)\n return objResult, attrResult, bpCliques, bpcObj, bpcAttr\n\n\n\n\n","repo_name":"yyxlucky/butterfly","sub_path":"CL.py","file_name":"CL.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17092546293","text":"class Trigger(object):\n\n\tdef __init__(self, editor):\n\t\tself.__init_attributes(editor)\n\t\tself.__sigid1 = editor.connect(\"quit\", self.__quit_cb)\n\t\tself.__sigid2 = editor.connect(\"supported-encodings-window\", self.__activate_cb)\n\t\teditor.register_object(self)\n\n\tdef __init_attributes(self, editor):\n\t\tself.__editor = editor\n\t\tself.__manager = None\n\t\treturn\n\n\tdef __destroy(self):\n\t\tif self.__manager: self.__manager.destroy()\n\t\tself.__editor.disconnect_signal(self.__sigid1, self.__editor)\n\t\tself.__editor.disconnect_signal(self.__sigid2, self.__editor)\n\t\tself.__editor.unregister_object(self)\n\t\tdel self\n\t\tself = None\n\t\treturn False\n\n\tdef __activate(self):\n\t\ttry:\n\t\t\tself.__manager.activate()\n\t\texcept AttributeError:\n\t\t\tfrom Manager import Manager\n\t\t\tself.__manager = Manager(self.__editor)\n\t\t\tself.__manager.activate()\n\t\treturn False\n\n\tdef __quit_cb(self, *args):\n\t\tself.__destroy()\n\t\treturn False\n\n\tdef __activate_cb(self, *args):\n\t\tfrom gobject import idle_add\n\t\tidle_add(self.__activate, priority=9999)\n\t\treturn False\n","repo_name":"mystilleef/scribes","sub_path":"SCRIBES/EncodingSystem/SupportedEncodings/Trigger.py","file_name":"Trigger.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"24794166866","text":"github_info = {\n \"user_name\": \"OWNER_OF_REPOSITORY\",\n \"repo_name\": \"NAME_OF_REPOSITORY\"\n}\n\ntoken = {\n \"Slack\": \"YOUR_SLACK_API_TOKEN\",\n \"GitHub\": \"YOUR_GITHUB_API_TOKEN\"\n}\n\nslack_channel = \"#YOUR_SLACK_CHANNEL\"\n\nscope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\"\n]","repo_name":"0417taehyun/we-ake-up","sub_path":"config_example.py","file_name":"config_example.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"233304938","text":"from datetime import datetime\nfrom api.models import Ratings, db\nfrom flask import request\nfrom flask_restx import Namespace, Resource\n\n\napi = Namespace(\"ratings\", description=\"rating for sellers\")\n\n\n@api.route(\"/\")\nclass RatingRoute(Resource):\n def post(self, seller_email):\n\n req_data = request.get_json()\n _buyer_email = req_data.get(\"buyer_email\")\n _rating_desc = req_data.get(\"rating_desc\")\n _rating = req_data.get(\"rating\")\n\n try:\n newRating = Ratings(\n buyer_email=_buyer_email,\n seller_email=seller_email,\n date=datetime.now(),\n rating=_rating,\n rating_desc=_rating_desc,\n )\n newRating.save()\n return {\"success\": True, \"msg\": \"Rating created\"}, 200\n except:\n return {\"success\": False, \"msg\": \"Rating not created\"}, 400\n\n","repo_name":"joowy/NittanyMarket","sub_path":"server/api/routes/rating_route.py","file_name":"rating_route.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16600203598","text":"# Day X\r\n\r\n# Description\r\n\r\nimport os\r\nimport sys\r\nfrom collections import Counter\r\nfrom pprint import pprint\r\n\r\n__inputfile__ = 'Day-21-input.txt'\r\n__location__ = os.path.join(sys.path[0], __inputfile__)\r\n\r\nwith open(__location__, 'r') as f:\r\n input_str = f.read().strip() # Takes the inputfile as a string\r\n\r\ntest = '''\\\r\nmxmxvkd kfcds sqjhc nhms (contains dairy, fish)\r\ntrh fvjkl sbzzf mxmxvkd (contains dairy)\r\nsqjhc fvjkl (contains soy)\r\nsqjhc mxmxvkd sbzzf (contains fish)'''\r\n\r\ndef parse(s):\r\n ret = []\r\n for food in s.split('\\n'):\r\n ing, ale = food.split(' (contains ')\r\n ing = set(ing.split(' '))\r\n ale = ale[:-1].split(', ')\r\n ret.append((ing,ale))\r\n return ret\r\n\r\ndef match_allergens(foods):\r\n allergens = {}\r\n all_ings = Counter()\r\n for food in foods:\r\n ings, ales = food\r\n for a in ales:\r\n #print(a)\r\n if a not in allergens:\r\n allergens[a] = ings\r\n else:\r\n allergens[a] = allergens[a].intersection(ings)\r\n all_ings.update(ings)\r\n pprint(allergens)\r\n\r\n return allergens, all_ings\r\n\r\n\r\ndef part1(allergens, all_ings):\r\n for a in allergens:\r\n for w in allergens[a]:\r\n all_ings.pop(w) if w in all_ings else None\r\n \r\n return sum(all_ings.values())\r\n\r\ndef part2(allergens):\r\n ings_with_all = {}\r\n\r\n while allergens:\r\n for a, i in allergens.items():\r\n if len(i) == 1:\r\n ings_with_all[a] = i\r\n del allergens[a]\r\n break\r\n seen = set().union(*ings_with_all.values())\r\n for a in allergens:\r\n allergens[a] -= seen\r\n \r\n\r\n return ','.join(list(k)[0] for k in list(zip(*sorted(ings_with_all.items())))[1])\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n inp = parse(input_str)\r\n alls, count_ings = match_allergens(inp)\r\n print(\"Part 1:\")\r\n pprint(part1(alls, count_ings))\r\n print(\"Part 2:\")\r\n print(part2(alls))\r\n \r\n","repo_name":"dotzo/AdventOfCode2020","sub_path":"AdventOfCode2020/Day-21.py","file_name":"Day-21.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11478914499","text":"#!/usr/bin/python\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\n'''\n\nEXAMPLES = \"\"\"\n- name: test base module\n routeros_system_ntp_client:\n enabled: True\n server_dns_names:\n - ntp.nict.jp\n\n- name: test base module\n routeros_system_ntp_client:\n enabled: False\n server_dns_names:\n - \"\"\n\n- name: test base module\n routeros_system_ntp_client:\n enabled: True\n primary-ntp: 192.168.0.1\n secondary-ntp: 192.168.0.1\n\n- name: test base module\n routeros_system_ntp_client:\n enabled: False\n primary-ntp: 0.0.0.0\n secondary-ntp: \"\"\n\"\"\"\n\nRETURN = \"\"\"\n\"\"\"\nimport re\n\nfrom ansible_collections.community.network.plugins.module_utils.network.routeros.routeros import run_commands\nfrom ansible_collections.community.network.plugins.module_utils.network.routeros.routeros import routeros_argument_spec\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.six import string_types\n\ndef to_lines(stdout):\n for item in stdout:\n if isinstance(item, string_types):\n item = str(item).split('\\n')\n yield item\n\ndef cleaning_output(respons):\n list_result = list()\n list_temp = respons.splitlines()\n for temp in list_temp:\n if temp.find('[') != 0:\n list_result.append(temp)\n if len(list_result) == 0:\n list_result.append('')\n return list_result \n\ndef check_exec_error(respons):\n list_error_message = list()\n list_error_string = [\n 'bad command name',\n 'no such item',\n 'expected end of command',\n 'syntax error',\n 'invalid value for argument'\n ]\n for temp in respons:\n for error_string in list_error_string:\n if temp.find(error_string) == 0:\n list_error_message.append('ERROR: ' + temp)\n return list_error_message\n\ndef get_param(module):\n dict_param = dict()\n list_param = ['enabled', 'primary_ntp', 'secondary_ntp', 'server_dns_names']\n list_param_conv = ['enabled', 'primary-ntp', 'secondary-ntp', 'server-dns-names']\n\n for num in range(len(list_param)):\n if list_param[num] in module.params:\n if type(module.params[list_param[num]]) == bool:\n if module.params[list_param[num]]:\n dict_param[list_param_conv[num]] = 'yes'\n else:\n dict_param[list_param_conv[num]] = 'no'\n else:\n dict_param[list_param_conv[num]] = module.params[list_param[num]]\n\n if 'primary-ntp' in dict_param:\n if dict_param['primary-ntp'] == '':\n dict_param['primary-ntp'] = '0.0.0.0'\n\n if 'secondary-ntp' in dict_param:\n if dict_param['secondary-ntp'] == '':\n dict_param['secondary-ntp'] = '0.0.0.0'\n\n return dict_param\n\ndef parse_output_system_ntp_client(list_output):\n dict_ntp_client = dict()\n list_param = ['enabled', 'primary-ntp', 'secondary-ntp', 'server-dns-names']\n\n for param in list_param:\n for output in list_output:\n mo = re.search(r'\\s' + param + ':\\s(.*)$', ' ' + output)\n if mo:\n dict_ntp_client[param] = mo.group(1)\n\n if len(dict_ntp_client['server-dns-names']) > 0:\n dict_ntp_client['server-dns-names'] = dict_ntp_client['server-dns-names'].split(',')\n\n return dict_ntp_client\n\ndef make_command_system_ntp_client(dict_param, dict_object):\n command = ''\n list_param = ['enabled', 'primary-ntp', 'secondary-ntp', 'server-dns-names']\n command_option = ''\n for param in list_param:\n if param in dict_param:\n if type(dict_param[param]) == list:\n if sorted(dict_param[param]) != sorted(dict_object[param]):\n temp = ''\n for item in dict_param[param]:\n temp = temp + ',' + item\n command_option = command_option + ' ' + param + '=\\\"' + temp[1:] + '\\\"'\n elif type(dict_param[param]) == str:\n if dict_param[param] != dict_object[param]:\n command_option = command_option + ' ' + param + '=\\\"' + dict_param[param] + '\\\"'\n else:\n if dict_param[param] != dict_object[param]:\n command_option = command_option + ' ' + param + '=' + dict_param[param]\n\n if command_option.strip() != '':\n command = '/system ntp client set' + command_option \n\n return command\n\ndef main():\n \"\"\"main entry point for module execution\n \"\"\"\n #### argument spec\n argument_spec = dict(\n enabled=dict(type='bool'),\n primary_ntp=dict(type='str'),\n secondary_ntp=dict(type='str'),\n server_dns_names=dict(type='list', elements='str')\n )\n # required=True\n # type='list', elements='int'\n # default='xxxx'\n # choices=[\"present\", \"absent\"]\n\n argument_spec.update(routeros_argument_spec)\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n result = {'changed': False}\n\n #### initialize\n changed_status = False\n failed_status = False\n list_exec = list()\n list_log = list()\n dict_param = dict()\n dict_object = dict()\n\n #### get parameter\n dict_param = get_param(module)\n \n #### exec get command\n commands = '/system ntp client print without-paging'\n responses = run_commands(module, commands)\n list_output = cleaning_output(responses[0])\n list_exec.append({'commands':commands, 'stdout':list_output})\n\n #### parse output\n dict_object = parse_output_system_ntp_client(list_output)\n\n #### make commad\n set_commands = make_command_system_ntp_client(dict_param, dict_object)\n\n #### check error\n error_messages = check_exec_error(list_output)\n if len(error_messages) > 0:\n msg = error_messages[0]\n module.fail_json(msg=msg, failed_conditions=list_exec)\n\n #### check mode and not changed\n if module.check_mode:\n list_log.append('INFO: CheckMode = True')\n if module.check_mode or set_commands == '':\n results = list()\n if set_commands != '':\n list_exec.append({'commands':set_commands, 'stdout':''})\n for exec_output in list_exec:\n for output in exec_output['stdout']:\n results.append(output)\n result.update({\n 'changed': changed_status,\n 'failed': failed_status,\n 'parameter': dict_param,\n 'object': dict_object,\n 'stdout': list_exec,\n 'stdout_lines': list(to_lines(results)),\n 'log': list_log\n }) \n module.exit_json(**result) \n\n #### exec set command\n commands = set_commands\n responses = run_commands(module, commands)\n list_output = cleaning_output(responses[0])\n list_exec.append({'commands':commands, 'stdout':list_output})\n\n #### check error\n if list_output[0] != '':\n msg = 'ERROR: ' + list_output[0]\n module.fail_json(msg=msg, failed_conditions=list_exec) \n else:\n changed_status = True\n\n #### return result\n results = list()\n for exec_output in list_exec:\n for output in exec_output['stdout']:\n results.append(output)\n result.update({\n 'changed': changed_status,\n 'failed': failed_status,\n 'parameter': dict_param,\n 'object': dict_object,\n 'stdout': list_exec,\n 'stdout_lines': list(to_lines(results)),\n 'log': list_log\n })\n\n module.exit_json(**result)\n\nif __name__ == '__main__':\n main()\n","repo_name":"likeuu-user/ansible_routeros","sub_path":"library/routeros_system_ntp_client.py","file_name":"routeros_system_ntp_client.py","file_ext":"py","file_size_in_byte":7667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4055719472","text":"import sys\n\nclass Stack(object):\n def __init__(self):\n self.stack = []\n self.min = sys.maxsize\n\n def push(self, x):\n if x > self.min:\n self.stack.append(x)\n else:\n self.min = x\n self.stack.append(2*x - self.min)\n \n def pop(self):\n x = self.stack.pop()\n if self.min > x:\n min_copy = self.min\n self.min = (2*self.min) - x\n return min_copy\n else:\n return x\n\n def getMin(self):\n return self.min\n\n def __str__(self):\n return str(self.stack)\n\ns = Stack()\nprint(s.push(3))\nprint(s.push(5))\nprint(s.getMin())\nprint(s.push(2))\nprint(s.push(1))\nprint(s.getMin())\nprint(s.pop())\nprint(s.getMin())\nprint(s.pop())\nprint(s.pop())","repo_name":"shrikantchine/algorithm-practice","sub_path":"stack_and_queue/min_elem_from_stack.py","file_name":"min_elem_from_stack.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16547969326","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n# import os\n# location=os.getcwd()\n\n# def chrome_setup():\n# from selenium.webdriver.chrome.service import Service\n# serv_obj=Service(\"C:\\Drivers\\chromedriver_win32\\chromedriver.exe\")\n#\n# \"\"\"download file in your desire location \"\"\"\n# # preferences = {\"download.default_directory\":location} # save files in desired location\n# # preferences = {\"download.default_directory\":\"C:\\Users\\Mansi Patel\\PycharmProjects\\pythonProject\\selenium_python\\day1\"}\n#\n# preferences = {\"download.default_directory\":location}\n# ops=webdriver.ChromeOptions()\n# ops.add_experimental_option(\"prefs\", preferences) # desired location\n#\n# driver=webdriver.Chrome(service=serv_obj,options=ops)\n# return driver\n#\n# driver=chrome_setup()\n#\n# driver.get(\"https://file-examples.com/index.php/sample-documents-download/sample-doc-download/\")\n# driver.maximize_window()\n# driver.implicitly_wait(10)\n# driver.find_element(By.XPATH,\"//tbody/tr[1]/td[5]/a[1]\").click()\n# time.sleep(20)\n\n\"\"\"for firefox browser\"\"\"\n\ndef firefox_setup():\n from selenium.webdriver.firefox.service import Service\n serv_obj=Service(\"C:\\Drivers\\geckodriver-v0.32.0-win-aarch64\\geckodriver.exe\")\n\n # settings\n ops=webdriver.FirefoxOptions()\n ops.set_preference(\"browser.helperApps.neverAsk.saveToDisk\",\"application/msword\")\n ops.set_preference(\"browser,download.manager.showWhenStarting\", False)\n driver=webdriver.Firefox(service=serv_obj,options=ops)\n return driver\n\ndriver=firefox_setup()\n\ndriver.get(\"https://file-examples.com/index.php/sample-documents-download/sample-doc-download/\")\ndriver.maximize_window()\ndriver.implicitly_wait(10)\ndriver.find_element(By.XPATH,\"//tbody/tr[1]/td[5]/a[1]\").click()\ntime.sleep(20)\n","repo_name":"MansiPatelcs/PythonAutomation","sub_path":"day1/fileDownload1.py","file_name":"fileDownload1.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38450484554","text":"from pprint import pprint\n\n# debug mode?\nDEBUG = False\n\n\nclass bcolors:\n \"\"\" ASCII Colors for printing\"\"\"\n\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n NONE = ''\n\n\ndef my_str_trim(s: str) -> str:\n \"\"\"returns the string with the first and last char removed\"\"\"\n\n return s[1:len(s) - 1]\n\n\ndef prin(s: str, param='', color: bcolors = bcolors.NONE) -> None:\n \"\"\"Only prints if debug is enabled, takes color parameters\"\"\"\n if DEBUG:\n if color:\n print(color)\n if param:\n pprint(s, param)\n else:\n pprint(s)\n if color:\n print(bcolors.ENDC)\n","repo_name":"Schippmunk/AssemblyStatAna","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35477943025","text":"__author__ = 'diablo'\nimport math\nfrom astar import *\n\n\nclass Triangle(object):\n def __init__(self, node1, node2, node3):\n self.d = 5\n self.node1 = node1\n self.node2 = node2\n self.node3 = node3\n self.v21 = np.array([(node2.x_n - node1.x_n), (node2.y_n - node1.y_n)])\n self.v21_incline = math.atan2(self.v21[1], self.v21[0])\n self.v21_incline2 = (self.node2.y_n - self.node1.y_n) / (self.node2.x_n - self.node1.x_n)\n self.v12 = np.array([(node1.x_n - node2.x_n), (node1.y_n - node2.y_n)])\n self.v12_incline = math.atan2(self.v12[1], self.v12[0])\n self.v12_incline2 = (self.node1.y_n - self.node2.y_n) / (self.node1.x_n - self.node2.x_n)\n self.v12_incline3 = (self.node1.y_n - self.node2.y_n) / (self.node1.x_n - self.node2.x_n) - 0.5\n self.v31 = np.array([(node3.x_n - node1.x_n), (node3.y_n - node1.y_n)])\n self.v31_incline = math.atan2(self.v31[1], self.v31[0])\n self.v13 = np.array([(node1.x_n - node3.x_n), (node1.y_n - node3.y_n)])\n self.v13_incline = math.atan2(self.v13[1], self.v13[0])\n self.v32 = np.array([(node3.x_n - node2.x_n), (node3.y_n - node2.y_n)])\n self.v32_incline = math.atan2(self.v32[1], self.v32[0])\n self.v23 = np.array([(node2.x_n - node3.x_n), (node2.y_n - node3.y_n)])\n self.v23_incline = math.atan2(self.v23[1], self.v23[0])\n self.v23_incline2 = (self.node2.y_n - self.node3.y_n) / (self.node2.x_n - self.node3.x_n)\n self.v32_v21_angle = np.abs(np.abs((self.v32_incline - self.v21_incline) * 180 / np.pi) - 180)\n self.v31_v23_angle = np.abs(np.abs((self.v31_incline - self.v23_incline) * 180 / np.pi) - 180)\n self.v13_v21_angle = np.abs(np.abs((self.v13_incline - self.v21_incline) * 180 / np.pi) - 180)\n self.desc_angle_u = self.v32_v21_angle - self.v32_v21_angle % self.d\n self.desc_angle_l = self.v32_v21_angle - self.v32_v21_angle % self.d + self.d\n self.angle_change = self.v32_v21_angle % self.d\n # self.incline_change = self.v32_v21_angle%self.d*np.pi/180\n self.incline_change = self.d * np.pi / 180\n self.desc_incline_u = self.v21_incline + self.incline_change\n self.desc_incline_l = self.v21_incline - self.incline_change\n self.desc_incline_u2 = self.v21_incline2 + self.incline_change\n self.desc_incline_l2 = self.v21_incline2 - self.incline_change\n self.m1 = (self.node2.y_n - self.node1.y_n) / (self.node2.x_n - self.node1.x_n)\n self.m2 = (self.node3.y_n - self.node2.y_n) / (self.node3.x_n - self.node2.x_n)\n\n self.this_node = None\n\n\ndef line_intersect(node1, node2, incline1, incline2):\n A = (-incline2 * node2.x_n + incline1 * node1.x_n - node1.y_n + node2.y_n) / (incline1 - incline2)\n x = A\n y = incline1 * (A - node1.x_n) + node1.y_n\n return (x, y)\n\n\nclass Desc_path(object):\n def __init__(self, initial_answer_path, obstacles):\n self.initial_answer_path = initial_answer_path\n self.obstacles = obstacles\n self.open_set = []\n self.final_edges = []\n\n def decs_edges(self):\n init = self.initial_answer_path[0]\n final = node_copy(self.initial_answer_path[-1])\n self.this_node = init\n self.initial_answer_path.append(final)\n for i in self.initial_answer_path: # seach through all points of trivial path\n triangle_temp = None\n j = i.parent_node\n if j is not None: # introducing\n k = j.parent_node\n node_temp = None\n if k is not None:\n for m in ('u', 'l'):\n if self.tri(i, m):\n break\n return self.open_set\n\n def tri(self, node3, u_or_l):\n triangle_temp = Triangle(self.this_node, node3.parent_node, node3) # introducing nodes 1 to 3 as triangle\n if u_or_l == 'l':\n line_temp = line_intersect(node3.parent_node.parent_node, node3, triangle_temp.desc_incline_l2,\n triangle_temp.v23_incline2) # getting coordination of the node on the v23\n if u_or_l == 'u':\n line_temp = line_intersect(node3.parent_node.parent_node, node3, triangle_temp.desc_incline_u2,\n triangle_temp.v23_incline2) # getting coordination of the node on the v23\n node_temp = Node(line_temp[0], line_temp[1])\n counter = 0 # checking to see the new edge doesn't intesect with the objects\n for l in range(len(self.obstacles.obstacles)):\n if not free_edge(node3.parent_node, node_temp, self.obstacles.obstacles[l]):\n return False\n counter += 1\n corner = None\n if counter == len(self.obstacles.obstacles): # check ends here\n self.open_set.append(node_temp)\n node_temp.parent_node = self.this_node\n this_edge = Edge(self.this_node, node_temp)\n node_temp.parent_edge = this_edge\n self.this_node = node_temp\n return True\n\n\nclass Corner(object):\n def __init__(self, triangle, obstacles):\n self.triangle = triangle\n self.cnode = triangle.node2\n self.node3 = triangle.node3\n self.node1 = triangle.node1\n self.angle = triangle.desc_incline_l2 # there is a need to decide between u2 or l2\n self.obstacles = obstacles\n self.distance = None\n self.node21 = None\n self.node23 = None\n self.node21_c = None\n self.node23_c = None\n self.edge12 = np.sqrt((self.node1.x_n - self.cnode.x_n) ** 2 + (self.node1.y_n - self.cnode.y_n) ** 2)\n self.edge23 = np.sqrt((self.node3.x_n - self.cnode.x_n) ** 2 + (self.node3.y_n - self.cnode.y_n) ** 2)\n\n def node_by_distance(self):\n dd = 0.5\n d = 0.005\n free_edge_ = True\n while free_edge_:\n self.node21 = node_angle_b(self.cnode, self.node1, self.triangle.v12_incline2, d)\n self.node23 = node_angle_b(self.cnode, self.node3, self.triangle.v23_incline2, d)\n self.node21_c = np.sqrt((self.node21.x_n - self.cnode.x_n) ** 2 + (self.node21.y_n - self.cnode.y_n) ** 2)\n self.node23_c = np.sqrt((self.node23.x_n - self.cnode.x_n) ** 2 + (self.node23.y_n - self.cnode.y_n) ** 2)\n counter = 0\n for j in self.obstacles.obstacles:\n free_edge_ = free_edge(self.node21, self.node23, j)\n if not free_edge_:\n continue\n counter += 1\n if counter == len(self.obstacles.obstacles) and d < self.edge12 and d < self.edge23:\n d += dd\n else:\n break\n self.distance = d\n\n\ndef node_angle_b(cnode, anode, incline, distance):\n a1 = (distance ** 2 / (incline ** 2 + 1.0)) ** (0.5)\n a2 = incline * (distance ** 2 / (incline ** 2 + 1.0)) ** (0.5)\n dx = cnode.x_n - anode.x_n\n dy = cnode.y_n - anode.y_n\n if 0 < dx and dy > 0:\n x1, y1 = -a1 + cnode.x_n, -a2 + cnode.y_n\n if dx < 0 < dy:\n x1, y1 = a1 + cnode.x_n, a2 + cnode.y_n\n if dx < 0 and dy < 0:\n x1, y1 = a1 + cnode.x_n, a2 + cnode.y_n\n if dx > 0 and dy < 0:\n x1, y1 = -a1 + cnode.x_n, -a2 + cnode.y_n\n return Node(x1, y1)\n\n\ndef node_angle_n(cnode, anode, incline, distance):\n a1 = (distance ** 2 / (incline ** 2 + 1.0)) ** (0.5)\n a2 = incline * (distance ** 2 / (incline ** 2 + 1.0)) ** (0.5)\n dx = cnode.x_n - anode.x_n\n dy = cnode.y_n - anode.y_n\n if 0 < dx and dy > 0:\n x1, y1 = -a1 + cnode.x_n, -a2 + cnode.y_n\n if dx < 0 < dy:\n x1, y1 = a1 + cnode.x_n, a2 + cnode.y_n\n if dx < 0 and dy < 0:\n x1, y1 = a1 + cnode.x_n, a2 + cnode.y_n\n if dx > 0 > dy:\n x1, y1 = a1 + cnode.x_n, a2 + cnode.y_n\n return Node(x1, y1)\n\n\nclass Corner_triangle(object):\n def __init__(self, disc_path, obstacles):\n self.disc_path = disc_path\n self.obstacles = obstacles\n\n def triangle(self):\n for i in self.disc_path: # seach through all points of trivial path\n j = i.parent_node\n if j is not None: # introducing\n k = j.parent_node\n if k is not None:\n i.parent_node.corner = Corner(Triangle(i.parent_node.parent_node, i.parent_node, i), self.obstacles)\n i.parent_node.corner.node_by_distance()\n","repo_name":"RezaSaidafkan/Path_Planning_Framework","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"29984941510","text":"\"\"\" Python 3 program to remove a given \r\nelement from an array\r\nwhether from beginning or any position\"\"\"\r\n\r\n# function to remove an element x from arr[] \r\n\"\"\"since size of array can't be changed after \r\nremoving therefore reduce the \r\nsize if the element to be removed is found\"\"\"\r\ndef deleteElement(arr, n, x): \r\n\t\r\n\t# first Search x in array \r\n\tfor i in range(n): \r\n\t\tif (arr[i] == x): \r\n\t\t\tbreak\r\n\r\n\t# If x found in array \r\n\tif (i < n): \r\n\t\t\r\n\t\t# reduce size of array and move \r\n\t\t# all elements one space ahead \r\n\t\tn = n - 1; \r\n\t\tfor j in range(i, n, 1): \r\n\t\t\tarr[j] = arr[j + 1] \r\n\r\n\treturn n \r\n\r\n# Driver Code \r\nif __name__ == '__main__': \r\n\tarr = [11, 15, 6, 8, 9, 10] \r\n\tn = len(arr) \r\n\tx = int(input())\r\n\r\n\t# Delete x from arr[] \r\n\tn = deleteElement(arr, n, x) \r\n\r\n\tprint(\"Array after deletion of\",x) \r\n\tfor i in range(n): \r\n\t\tprint(arr[i], end = \" \") \r\n\t\t\r\n","repo_name":"Nishtha096/letsupgrade_assignments","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71262463824","text":"\"\"\"\nHomework12.\n\nDescription:\n Given list of list of list etc of integers\n write recursive function that will accept as argument target list\n and return sum of all integers inside it\n Input: [[[[1, 4, 5], [[6, 9],[[[8, 1], 7], 3], 2], 7], 5, 2], 9, [1, 2]]\n Output: Target sum = 72\n\"\"\"\n\ninput_l = [[[[1, 4, 5], [[6, 9], [[[8, 1], 7], 3], 2], 7], 5, 2], 9, [1, 2]]\n\n\ndef get_sum(target_list):\n \"\"\"\n Recursive function that accepts target list as argument.\n\n Args:\n target_list (type == list): should contain only lists\n\n Returns:\n total\n \"\"\"\n total = 0\n for el in target_list:\n total += get_sum(el) if type(el) == list else el\n return total\n\n\nprint(f'Target sum = {get_sum(input_l)}')\n","repo_name":"Ihor-Kaz/My_Homework","sub_path":"homework12_recurs.py","file_name":"homework12_recurs.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1224752770","text":"#!/usr/bin/env python\n'''A module for reading in Blender Scene Descriptions from yaml.\n\n'''\n\nimport os\nimport sys\n# pylint: disable=import-error\nimport bpy\n\nCURRENT_DIRECTORY = os.getcwd()\nif not CURRENT_DIRECTORY in sys.path:\n sys.path.append(CURRENT_DIRECTORY)\n\n# pylint: disable=wrong-import-position\nimport blender_utils\nimport utils\n\nCOMMON_LIGHT_SETTINGS = [\n 'shadow_adaptive_threshold',\n 'shadow_buffer_bias',\n 'shadow_buffer_bleed_bias',\n 'shadow_buffer_clip_end',\n 'shadow_buffer_clip_start',\n 'shadow_buffer_samples',\n 'shadow_buffer_size',\n 'shadow_buffer_soft',\n 'shadow_buffer_type',\n 'shadow_color',\n 'shadow_filter_type',\n 'shadow_method',\n 'shadow_ray_sample_method',\n 'shadow_ray_samples',\n 'shadow_sample_buffers',\n 'shadow_soft_size',\n 'use_auto_clip_end',\n 'use_auto_clip_start',\n 'use_only_shadow',\n 'use_shadow',\n 'use_shadow_layer'\n] # yapf: disable\n\n\ndef add_point_light(light):\n '''https://docs.blender.org/api/current/bpy.types.PointLamp.html\n\n '''\n bpy.ops.object.lamp_add(type = 'POINT', location = blender_utils.pose_to_vec(light['pose']))\n ALL_SETTINGS = COMMON_LIGHT_SETTINGS + [\n 'compression_threshold',\n 'constant_coefficient',\n 'falloff_curve',\n 'falloff_type',\n 'ge_shadow_buffer_type',\n 'linear_attenuation',\n 'linear_coefficient',\n 'quadratic_attenuation',\n 'quadratic_coefficient',\n 'use_sphere'\n ] # yapf: disable\n for attr in ALL_SETTINGS:\n if attr in light:\n setattr(bpy.context.active_object.data, attr, light[attr])\n return\n\n\ndef add_sun_light(light):\n '''https://docs.blender.org/api/current/bpy.types.SunLamp.html\n\n '''\n bpy.ops.object.lamp_add(type = 'SUN', location = blender_utils.pose_to_vec(light['pose']))\n ALL_SETTINGS = COMMON_LIGHT_SETTINGS + [\n 'compression_threshold',\n 'ge_shadow_buffer_type',\n 'shadow_frustum_size',\n 'show_shadow_box',\n ]\n for attr in ALL_SETTINGS:\n if attr in light:\n setattr(bpy.context.active_object.data, attr, light[attr])\n return\n\n\ndef add_spot_light(light):\n '''https://docs.blender.org/api/current/bpy.types.SpotLamp.html\n\n '''\n bpy.ops.object.lamp_add(type = 'SPOT', location = blender_utils.pose_to_vec(light['pose']))\n bpy.context.active_object.rotation_mode = 'QUATERNION'\n bpy.context.active_object.rotation_quaternion = blender_utils.pose_to_quat(light['pose'])\n ALL_SETTINGS = COMMON_LIGHT_SETTINGS + [\n 'compression_threshold',\n 'constant_coefficient',\n 'falloff_curve',\n 'falloff_type',\n 'ge_shadow_buffer_type',\n 'halo_intensity',\n 'halo_step',\n 'linear_attenuation',\n 'linear_coefficient',\n 'quadratic_attenuation',\n 'quadratic_coefficient',\n 'show_cone',\n 'spot_blend',\n 'spot_size',\n 'use_halo',\n 'use_sphere',\n 'use_square',\n ] # yapf: disable\n for attr in ALL_SETTINGS:\n if attr in light:\n setattr(bpy.context.active_object.data, attr, light[attr])\n return\n\n\ndef add_hemi_light(light):\n '''https://docs.blender.org/api/current/bpy.types.HemiLamp.html\n\n '''\n bpy.ops.object.lamp_add(type = 'HEMI', location = blender_utils.pose_to_vec(light['pose']))\n return\n\n\ndef add_area_light(light):\n '''https://docs.blender.org/api/current/bpy.types.AreaLamp.html\n\n '''\n bpy.ops.object.lamp_add(type = 'AREA', location = blender_utils.pose_to_vec(light['pose']))\n bpy.context.active_object.rotation_mode = 'QUATERNION'\n bpy.context.active_object.rotation_quaternion = blender_utils.pose_to_quat(light['pose'])\n ALL_SETTINGS = COMMON_LIGHT_SETTINGS + [\n 'compression_threshold',\n 'gamma',\n 'ge_shadow_buffer_type',\n 'shape',\n 'size',\n 'size_y',\n 'use_dither',\n 'use_jitter',\n 'use_umbra'\n ] # yapf: disable\n for attr in ALL_SETTINGS:\n if attr in light:\n setattr(bpy.context.active_object.data, attr, light[attr])\n return\n\n\nLIGHT_MAP = {\n 'point': add_point_light,\n 'sun': add_sun_light,\n 'spot': add_spot_light,\n 'hemi': add_hemi_light,\n 'area': add_area_light,\n}\n\n\ndef add_light(light):\n LIGHT_MAP[light['type'].lower()](light)\n # Set the common light settings:\n ALL_SETTINGS = [\n 'color',\n 'distance',\n 'energy',\n 'use_diffuse',\n 'use_negative',\n 'use_nodes',\n 'use_own_layer',\n 'use_specular'\n ] # yapf: disable\n for attr in ALL_SETTINGS:\n if attr in light:\n setattr(bpy.context_active_object.data, attr, light[attr])\n return\n\n\ndef add_camera(camera):\n blender_utils.add_camera(blender_utils.pose_to_vec(camera['pose']), blender_utils.pose_to_quat(camera['pose']))\n\n\ndef set_render_settings(settings):\n '''Settings for rendering. See https://docs.blender.org/api/current/bpy.types.RenderSettings.html for details.\n\n '''\n ALL_SETTINGS = [\n 'alpha_mode',\n 'antialiasing_samples',\n 'border_max_x',\n 'border_max_y',\n 'border_min_x',\n 'border_min_y',\n 'display_mode',\n 'dither_intensity',\n 'edge_color',\n 'edge_threshold',\n 'engine',\n 'field_order',\n 'file_extension',\n 'filepath',\n 'filter_size',\n 'fps',\n 'fps_base',\n 'frame_map_new',\n 'frame_map_old',\n 'has_multiple_engines',\n 'is_movie_format',\n 'line_thickness',\n 'line_thickness_mode',\n 'motion_blur_samples',\n 'motion_blur_shutter',\n 'motion_blur_shutter_curve',\n 'octree_resolution',\n 'pixel_aspect_x',\n 'pixel_aspect_y',\n 'pixel_filter_type',\n 'preview_start_resolution',\n 'raytrace_method',\n 'resolution_percentage',\n 'resolution_x',\n 'resolution_y',\n 'sequencer_gl_preview',\n 'simplify_ao_sss',\n 'simplify_child_particles',\n 'simplify_child_particles_render',\n 'simplify_shadow_samples',\n 'simplify_subdivision',\n 'simplify_subdivision_render',\n 'stamp_background',\n 'stamp_font_size',\n 'stamp_foreground',\n 'stamp_note_text',\n 'threads',\n 'threads_mode',\n 'tile_x',\n 'tile_y',\n 'use_border',\n 'use_compositing',\n 'use_crop_to_border',\n 'use_edge_enhance',\n 'use_envmaps',\n 'use_file_extension',\n 'use_full_sample',\n 'use_local_coords',\n 'use_motion_blur',\n 'use_multiview',\n 'use_overwrite',\n 'ues_persistent_data',\n 'use_raytrace',\n 'use_render_cache',\n 'use_shading_nodes',\n 'use_shadows',\n 'use_simplify',\n 'use_single_layer',\n 'use_sss',\n 'use_stamp',\n 'use_stamp_date',\n 'use_stamp_lens',\n 'use_stamp_memory',\n 'use_stamp_render_time',\n 'use_textures',\n 'use_world_space_shading'\n ] # yapf: disable\n\n for attr in ALL_SETTINGS:\n if attr in settings:\n setattr(bpy.context.scene.render, attr, settings[attr])\n if 'ffmpeg' in settings:\n set_ffmpeg_settings(settings['ffmpeg'])\n if 'image_format_settings' in settings:\n set_image_format_settings(settings['image_format_settings'])\n\n\ndef set_ffmpeg_settings(settings):\n '''https://docs.blender.org/api/current/bpy.types.FFmpegSettings.html\n\n '''\n\n FFMPEG_SETTINGS = [\n 'audio_bitrate',\n 'audio_channels',\n 'audio_codec',\n 'audio_mixrate',\n 'audio_volume',\n 'buffersize',\n 'codec',\n 'constant_rate_factor',\n 'ffmpeg_preset',\n 'format',\n 'gopsize',\n 'max_b_frames',\n 'maxrate',\n 'minrate',\n 'muxrate',\n 'packetsize',\n 'use_autosplit',\n 'use_lossless_output',\n 'use_max_b_frames',\n 'video_bitrate'\n ] # yapf: disable\n\n for attr in FFMPEG_SETTINGS:\n if attr in settings:\n setattr(bpy.context.scene.render.ffmpeg, attr, settings[attr])\n\n\ndef set_image_format_settings(settings):\n '''https://docs.blender.org/api/current/bpy.types.ImageFormatSettings.html\n\n '''\n\n IMAGE_SETTINGS = [\n 'cineon_black',\n 'cineon_gamma',\n 'cineon_white',\n 'color_depth',\n 'color_mode',\n 'compression',\n 'display_settings',\n 'exr_codec',\n 'file_format',\n 'quality',\n 'stereo_3d_format',\n 'tiff_codec',\n 'use_cineon_log',\n 'use_preview',\n 'use_zbuffer',\n 'view_settings',\n 'views_format'\n ] # yapf: disable\n for attr in IMAGE_SETTINGS:\n if attr in settings:\n setattr(bpy.context.scene.render.image_format_settings, attr, settings[attr])\n\n\ndef set_light_settings(settings):\n '''Settings for world lighting. See https://docs.blender.org/api/current/bpy.types.WorldLighting.html for details.\n\n '''\n ALL_SETTINGS = [\n 'adapt_to_speed',\n 'ao_blend_type',\n 'ao_factor',\n 'bias',\n 'correction',\n 'distance',\n 'environment_color',\n 'environment_energy',\n 'error_threshold',\n 'falloff_strength',\n 'gather_method',\n 'indirect_bounces',\n 'indirect_factor',\n 'passes',\n 'sample_method',\n 'samples',\n 'threshold',\n 'use_ambient_occlusion',\n 'use_cache',\n 'use_environment_light',\n 'use_indirect_light',\n 'use_falloff'\n ] # yapf: disable\n\n if not bpy.data.worlds:\n bpy.ops.world.new()\n for attr in ALL_SETTINGS:\n if attr in settings:\n setattr(bpy.data.worlds['World'].light_settings, attr, settings[attr])\n\n\ndef set_world_settings(settings):\n '''Settings for the world. See https://docs.blender.org/api/current/bpy.types.World.html for details.\n '''\n ALL_SETTINGS = [\n 'active_texture_index',\n 'ambient_color',\n 'color_range',\n 'exposure',\n 'horizon_color',\n 'use_sky_blend',\n 'use_sky_paper',\n 'use_sky_real',\n 'zenith_color'\n ] # yapf: disable\n if not bpy.data.worlds:\n bpy.ops.world.new()\n for attr in ALL_SETTINGS:\n if attr in settings:\n setattr(bpy.data.worlds['World'], attr, settings[attr])\n\n\ndef add_blender_scene(scenefile):\n scene = utils.read_yaml_data(scenefile)\n for light in scene.get('lights', []):\n add_light(light)\n if 'camera' in scene:\n add_camera(scene['camera'])\n set_render_settings(scene.get('render', {}))\n set_light_settings(scene.get('light_settings', {}))\n set_world_settings(scene.get('world_settings', {}))\n # TODO: add various other objects to the scene\n","repo_name":"KavrakiLab/robowflex","sub_path":"robowflex_visualization/old/blender_render_scene.py","file_name":"blender_render_scene.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"47"} +{"seq_id":"24611376054","text":"from nornir import InitNornir\nfrom nornir_utils.plugins.functions import print_result, print_title\nfrom nornir_netmiko import netmiko_send_command, netmiko_send_config\nfrom nornir.core.filter import F\n\nnr = InitNornir(\n config_file=\"config.yml\"\n)\n\ndef config(push):\n push.run(task=netmiko_send_config, config_file=\"push-config.txt\")\n push.run(task=netmiko_send_command, command_string = \"sh run | begin line \")\n push.run(task=netmiko_send_command, command_string = \"wr mem\")\n\ndevices = nr.filter(F(groups__any=[\"AS65000\", \"ISP\", \"EIGRP700\"]))\n\nresults = devices.run(task = config)\n\nprint_title(\"Deploying Configuration\")\nprint_result(results)\n\n","repo_name":"rogerperkin/network-programmability","sub_path":"SCRIPTS/Nornir/push-config.py","file_name":"push-config.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"47"} +{"seq_id":"73565238223","text":"a = 'Aa'\r\nb = 'Aa'\r\nh = 'heterozygous'\r\no = 'homozygous dominant'\r\nu = 'homozygous recessive'\r\nc = a[0] + b[0] + ' ' + a[0] + b[1] + ' ' + b[0] + a[0] + ' ' + b[0] + a[1]\r\nGenotype_Table = {\r\n 'A': {\r\n 'a': h,\r\n 'A': o,\r\n },\r\n 'a': {\r\n 'a': u,\r\n 'A': h,\r\n },\r\n}\r\n\r\noutput = ''\r\nfor i in c.split():\r\n output += Genotype_Table[i[0]][i[1]] + ' '\r\nprint(f'Genes-> {c}')\r\nprint(f\"Genotypes-> {output}\")\r\n\r\nimport time\r\n\r\n\r\ndef FOIL(arg): return arg[0] + arg[2] + ' ' + arg[0] + arg[3] + ' ' + arg[1] + arg[2] + ' ' + arg[1] + arg[3]\r\ndef FOILL(arg): return [arg[0] + arg[2], arg[0] + arg[3], arg[1] + arg[2], arg[1] + arg[3]]\r\nDihybrid1 = 'SSYY'\r\nparent1 = FOIL(Dihybrid1)\r\nprint('parent 1: ' + parent1)\r\nDihybrid2 = 'SSYY'\r\nparent2 = FOIL(Dihybrid2)\r\nprint('parent 2: ' + parent2)\r\n\r\nsuperlongcross = parent1[0] + parent2[0] + parent1[1] + parent2[1] + ' ' + parent1[0] + parent2[3] + parent1[1] + parent2[4] + ' ' + parent1[0] + parent2[6] + parent1[1] + parent2[7] + ' ' + parent1[0] + parent2[9] + parent1[1] + parent2[10] + ' ' + parent1[3] + parent2[0] + parent1[4] + parent2[1] + ' ' + parent1[3] + parent2[3] + parent1[4] + parent2[4] + ' ' + parent1[3] + parent2[6] + parent1[4] + parent2[7] + ' ' + parent1[3] + parent2[9] + parent1[4] + parent2[10] + \\\r\n ' ' + parent1[6] + parent2[0] + parent1[7] + parent2[1] + ' ' + parent1[6] + parent2[3] + parent1[7] + parent2[4] + ' ' + parent1[6] + parent2[6] + parent1[7] + parent2[7] + ' ' + parent1[6] + parent2[9] + parent1[7] + parent2[10] + \\\r\n ' ' + parent1[9] + parent2[0] + parent1[10] + parent2[1] + ' ' + parent1[9] + parent2[3] + parent1[10] + parent2[4] + \\\r\n ' ' + parent1[9] + parent2[6] + parent1[10] + parent2[7] + \\\r\n ' ' + parent1[9] + parent2[9] + parent1[10] + parent2[10]\r\nprint(superlongcross)\r\n\r\n#0.0010006427764892578\r\ndef foil(arg): return arg[0] + arg[2] + arg[0] + \\\r\n arg[3] + arg[1] + arg[2] + arg[1] + arg[3]\r\np1 = 'SSYY'\r\np2 = 'SSYY'\r\np3 = foil(p1)\r\np4 = foil(p2)\r\nprint(p3)\r\nprint(p4)\r\n\r\n\r\nsecond = ''\r\np11 = FOILL(p1)\r\np22 = FOILL(p2)\r\nt1 = time.time()\r\nfor i in p11:\r\n for ii in p22:\r\n second += (i[0]+ii[0]+i[1]+ii[1]) + ' '\r\n\r\nprint(second)\r\n\r\nt2 = time.time() - t1\r\n\r\nprint(t2)\r\n","repo_name":"BoredAllWays/SmallProjects","sub_path":"DiHybridCross.py","file_name":"DiHybridCross.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25516878472","text":"import helpers\n\nfrom deepblue_client import DeepBlueClient\n\nclass TestChanges(helpers.TestCase):\n\n def test_change_extra_metadata(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n sample_id = self.sample_ids[0]\n regions_data = \"chr1\\t1\\t100\"\n format = \"\"\n\n # adding two experiments with the same data should work\n res = epidb.add_experiment(\"test_exp1\", \"hg19\", \"Methylation\", sample_id, \"tech1\",\n \"ENCODE\", \"desc1\", regions_data, format, {\"NAME\":\"FELIPE\", \"LAST_NAME\": \"ALBRECHT\"}, self.admin_key)\n self.assertSuccess(res)\n _id = res[1]\n\n res = epidb.change_extra_metadata(_id, \"NAME\", \"JOSE\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.change_extra_metadata(_id, \"LAST_NAME\", \"FERNANDES\", self.admin_key)\n self.assertSuccess(res)\n\n status, info = epidb.info(_id, self.admin_key)\n self.assertSuccess(status, info)\n self.assertEqual({\"NAME\":\"JOSE\", \"LAST_NAME\":\"FERNANDES\"}, info[0][\"extra_metadata\"])\n\n (status, ss) = epidb.search(\"JOSE\", \"\", self.admin_key)\n self.assertEqual(1, len(ss))\n (status, ss) = epidb.search(\"FELIPE\", \"\", self.admin_key)\n self.assertEqual(0, len(ss))\n\n\n res = epidb.change_extra_metadata(sample_id, \"source\", \"ENCODE\", self.admin_key)\n self.assertSuccess(res)\n\n s, info = epidb.info(sample_id, self.admin_key)\n self.assertEqual(info[0][\"source\"], \"ENCODE\")\n\n","repo_name":"MPIIComputationalEpigenetics/DeepBlue","sub_path":"server/tests/test_cases/changes.py","file_name":"changes.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"3769643416","text":"import csv\nimport requests\nfrom bs4 import BeautifulSoup\nfrom wordcloud import WordCloud\nfrom konlpy.tag import Twitter\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\nfrom utils import createDirectory, createFilename, rel2absTime\nfrom tqdm import tqdm\nimport datetime\nimport os\nimport warnings\n\n#소원의돌 스크래핑\ndef wishScraping(year, month):\n dirname = \"C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DB/소원의돌/{}/{}\".format(year, month)\n\n now = str(datetime.datetime.now())\n now_month = int(now[5:7])\n now_day = int(now[8:10])\n\n start_day = 1\n for day in range(1,32):\n filename = createFilename(\"소원의돌\",year,month,day,\"csv\")\n if not os.path.exists(filename):\n start_day = day - 1\n break\n\n if now_month > month:\n if month == 1 or 3 or 5 or 7 or 8 or 10 or 12:\n days = range(start_day, 32)\n elif month == 2:\n days = range(start_day,29)\n else:\n days = range(start_day,31)\n else:\n days = range(start_day,now_day + 1)\n\n for day in tqdm(days, desc='{}월 소원의돌 수집중'.format(month)):\n #csv 파일 해더 입력\n filename = createFilename(\"소원의돌\",year,month,day,\"csv\")\n createDirectory(dirname)\n f = open(filename, \"w\", encoding=\"utf-8-sig\", newline=\"\")\n writer = csv.writer(f)\n row_title = ['number', 'nickname', 'wish', 'point', 'continuity', 'total']\n writer.writerow(row_title)\n \n #url 주소 입력\n if day < 10:\n if month < 10:\n url = \"https://chimhaha.net/check?date={}-0{}-0{}\".format(year, month, day)\n else:\n url = \"https://chimhaha.net/check?date={}-{}-0{}\".format(year, month, day)\n else:\n if month < 10:\n url = \"https://chimhaha.net/check?date={}-0{}-{}\".format(year, month, day)\n else:\n url = \"https://chimhaha.net/check?date={}-{}-{}\".format(year, month, day)\n\n res = requests.get(url)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, \"lxml\")\n items = soup.find_all(\"div\", attrs={\"class\":\"item\"})\n #하루에 빌어진 소원들 입력\n for item in tqdm(items, desc='{}월 {}일 소원의돌 수집중'.format(month,day)):\n number = item.find(\"div\", attrs={\"class\":\"number\"}).get_text()[:-1]\n nickname = item.find(\"div\", attrs={\"class\":\"nickName\"}).get_text()\n wish = item.find(\"div\", attrs={\"class\":\"comment\"}).get_text()\n point = item.find(\"div\", attrs={\"class\":\"point\"}).get_text()[:-1]\n continuity = item.find(\"div\", attrs={\"class\":\"continue\"}).get_text()[:-2]\n total = item.find(\"div\", attrs={\"class\":\"total\"}).get_text()[1:-1] \n data = [number, nickname, wish, point, continuity, total]\n writer.writerow(data)\n\n#소원의돌 일자별 데이터 월별로 병합\ndef wishConcat(year, month):\n if month == 1 or 3 or 5 or 7 or 8 or 10 or 12:\n days = range(1, 32)\n elif month == 2:\n days = range(1,29)\n else:\n days = range(1,31)\n\n df_all = pd.DataFrame()\n\n for day in tqdm(days, desc='{}월 소원의돌 병합중'.format(month)):\n filename = createFilename(\"소원의돌\",year,month,day,\"csv\")\n if not os.path.exists(filename):\n break\n df_wish = pd.read_csv(filename)\n df_wish['date'] = \"{}.{}.{}\".format(year, month, day)\n df_all = pd.concat([df_all, df_wish])\n\n if month < 10:\n str_month = \"0\"+ str(month)\n else:\n str_month =str(month)\n df_all.to_csv(\"C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DB/소원의돌/{}/{}/{}{}_소원의돌.csv\".format(year, month, year, str_month), mode='w',index=False)\n\n#열혈 접속자 기도를 해당월 마지막일 기준으로 스크래핑\ndef wishLoyal(year, month):\n if month < 10:\n str_month = \"0\"+ str(month)\n else:\n str_month =str(month)\n df_wish = pd.read_csv(\"C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DB/소원의돌/{}/{}/{}{}_소원의돌.csv\".format(year, month, year, str_month))\n nicknames = df_wish['nickname'].unique()\n # 연속 기도, 누적 기도 수 입력\n for nickname in tqdm(nicknames, desc='유저 정보 생성 중'):\n df_single_user = df_wish[df_wish['nickname'] == nickname].sort_values(by='date')\n dates = df_single_user['date'].unique()\n for date in dates:\n day = int(date.split('.')[2])\n if date == dates[0]:\n continuity = 1\n total = 1\n else:\n total += 1\n if \"{}.{}.{}\".format(year, month, day-1) in dates:\n continuity += 1\n else:\n continuity = 1\n df_wish.loc[(df_wish['nickname'] == nickname) & (df_wish['date'] == date), 'continuity'] = continuity\n df_wish.loc[(df_wish['nickname'] == nickname) & (df_wish['date'] == date), 'total'] = total\n # 유저 정보 생성\n df_user = pd.DataFrame()\n df_user['nickname'] = nicknames\n for nickname in tqdm(nicknames, '연속/누적 기도 계산 중'):\n df_user.loc[df_user['nickname'] == nickname, 'total'] = np.max(df_wish[df_wish['nickname'] == nickname]['total'])\n df_user.loc[df_user['nickname'] == nickname, 'continuity'] = np.max(df_wish[df_wish['nickname'] == nickname]['continuity'])\n dirname = 'C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DA/소원의돌/{}/{}'.format(year, month)\n createDirectory(dirname)\n df_user.to_csv(\"C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DA/소원의돌/{}/{}/{}{}_소원의돌_user.csv\".format(year,month,year,month), mode='w', index=False)\n\n #열혈 유저 정보 생성\n if month == 1 or 3 or 5 or 7 or 8 or 10 or 12:\n full_month = 31\n elif month == 2:\n full_month = 28\n else:\n full_month = 30\n df_loyal_users = df_user[df_user['total'] == full_month]\n nicknames = df_loyal_users['nickname']\n for nickname in nicknames:\n df_loyal_users.loc[df_loyal_users['nickname'] == nickname, 'order'] = np.mean(df_wish[df_wish['nickname'] == nickname]['number'])\n if month == 1 or 3 or 5 or 7 or 8 or 10 or 12:\n url = \"https://chimhaha.net/check?date={}-{}-31\".format(year, str_month)\n elif month == 2:\n url = \"https://chimhaha.net/check?date={}-{}-28\".format(year, str_month)\n else:\n rl = \"https://chimhaha.net/check?date={}-{}-30\".format(year, str_month)\n res = requests.get(url)\n\n res.raise_for_status()\n soup = BeautifulSoup(res.text, \"lxml\")\n items = soup.find_all(\"div\", attrs={\"class\":\"item\"})\n for item in tqdm(items, '열혈 유저 정보 수집 중'):\n number = item.find(\"div\", attrs={\"class\":\"number\"}).get_text()[:-1]\n nickname = item.find(\"div\", attrs={\"class\":\"nickName\"}).get_text().strip()\n wish = item.find(\"div\", attrs={\"class\":\"comment\"}).get_text()\n point = item.find(\"div\", attrs={\"class\":\"point\"}).get_text()[:-1]\n continuity = item.find(\"div\", attrs={\"class\":\"continue\"}).get_text()[:-2]\n total = item.find(\"div\", attrs={\"class\":\"total\"}).get_text()[1:-1] \n \n if(df_loyal_users['nickname'].isin([nickname]).any()):\n df_loyal_users.loc[df_loyal_users['nickname'] == nickname, 'wish'] = wish\n\n df_loyal_users.to_csv(\"C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DA/소원의돌/{}/{}/{}{}_소원의돌_loyaluser.csv\".format(year,month,year,month), mode='w', index=False)\n\n#해당월 소원의 돌 워드클라우드 일자별, 월별로 생성\ndef wishCloud(year, month):\n warnings.filterwarnings('ignore')\n if month == 1 or 3 or 5 or 7 or 8 or 10 or 12:\n days = range(1, 32)\n elif month == 2:\n days = range(1,29)\n else:\n days = range(1,31)\n text_month=''\n\n for day in tqdm(days, desc='{}월 소원의돌 wordcloud 생성중'.format(month)):\n filename = createFilename(\"소원의돌\",year,month,day,\"csv\")\n if not os.path.exists(filename):\n break\n wishes = pd.read_csv(filename)['wish']\n text =\"\"\n for wish in wishes:\n text = text + str(wish)\n \n text_month = text_month + text\n\n twitter = Twitter()\n\n # twitter함수를 통해 읽어들인 내용의 형태소를 분석한다.\n sentences_tag = []\n sentences_tag = twitter.pos(text) \n\n noun_adj_list = []\n\n\n # tag가 명사이거나 형용사인 단어들만 noun_adj_list에 넣어준다.\n for word, tag in sentences_tag:\n if tag in ['Noun' , 'Adjective']: \n noun_adj_list.append(word)\n\n\n # 가장 많이 나온 단어부터 40개를 저장한다.\n counts = Counter(noun_adj_list)\n tags = counts.most_common(40) \n\n\n # WordCloud를 생성한다.\n # 한글을 분석하기위해 font를 한글로 지정해주어야 된다. macOS는 .otf , window는 .ttf 파일의 위치를\n # 지정해준다. (ex. '/Font/GodoM.otf')\n wc = WordCloud(font_path='C:/Windows/Fonts/맑은 고딕/malgunbd.ttf',background_color=\"white\", max_font_size=60)\n cloud = wc.generate_from_frequencies(dict(tags))\n\n\n # 생성된 WordCloud를 test.jpg로 보낸다.\n cloud.to_file(filename[:-4]+\"_cloud.jpg\")\n\n\n\n\n # 월단위 cloud 작성\n twitter = Twitter()\n\n # twitter함수를 통해 읽어들인 내용의 형태소를 분석한다.\n sentences_tag = []\n sentences_tag = twitter.pos(text_month) \n\n noun_adj_list = []\n\n\n # tag가 명사이거나 형용사인 단어들만 noun_adj_list에 넣어준다.\n for word, tag in sentences_tag:\n if tag in ['Noun' , 'Adjective']: \n noun_adj_list.append(word)\n\n\n # 가장 많이 나온 단어부터 40개를 저장한다.\n counts = Counter(noun_adj_list)\n tags = counts.most_common(40) \n wc = WordCloud(font_path='C:/Windows/Fonts/맑은 고딕/malgunbd.ttf',background_color=\"white\", max_font_size=60)\n cloud = wc.generate_from_frequencies(dict(tags))\n cloud.to_file(\"C:/Users/KimJihong/Desktop/김지홍/개발/침하하/DB/소원의돌/{}/{}/{}{}_소원의돌_cloud.jpg\".format(year, month, year, month))\n print(\"finish!\".format(day))","repo_name":"Jihong-Kim97/chimhaha","sub_path":"wish.py","file_name":"wish.py","file_ext":"py","file_size_in_byte":10381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13760583177","text":"\nfrom epyk.core.js import JsUtils\nfrom epyk.core.js.primitives import JsObjects\n\n\nclass SocketIO(object):\n\n def __init__(self, htmlCode=None, src=None):\n \"\"\"\n Description:\n ------------\n \"\"\"\n if src is not None:\n src.jsImports.add('socket.io')\n self._src = src\n self._selector = htmlCode or \"socket_%s\" % id(self)\n\n @property\n def message(self):\n \"\"\"\n\n \"\"\"\n return JsObjects.JsObject.JsObject.get(\"data\")\n\n def send(self, msg):\n \"\"\"\n Description:\n ------------\n This will send an event called message(built in) to our client, four seconds after the client connects.\n The send function on socket object associates the 'message' event.\n\n Usage::\n\n https://www.tutorialspoint.com/socket.io/socket.io_event_handling.htm\n\n Attributes:\n ----------\n :param msg:\n \"\"\"\n msg = JsUtils.jsConvertData(msg, None)\n return JsObjects.JsVoid(\"%s.send(%s)\" % (self._selector, msg))\n\n def join(self, roomId):\n \"\"\"\n Description:\n ------------\n\n Usage::\n\n https://www.tutorialspoint.com/socket.io/socket.io_rooms.htm\n\n Attributes:\n ----------\n :param roomId:\n \"\"\"\n roomId = JsUtils.jsConvertData(roomId, None)\n return JsObjects.JsVoid(\"%s.join(%s)\" % (self._selector, roomId))\n\n def inRoom(self, roomId, eventType, jsData=None):\n \"\"\"\n Description:\n ------------\n\n Usage::\n\n https://www.tutorialspoint.com/socket.io/socket.io_rooms.htm\n\n Attributes:\n ----------\n :param roomId: String. The room identifier\n \"\"\"\n jsData = JsUtils.jsConvertData(jsData or {}, None)\n eventType = JsUtils.jsConvertData(eventType, None)\n roomId = JsUtils.jsConvertData(roomId, None)\n return JsObjects.JsVoid(\"%s.in(%s).emit(%s, %s)\" % (self._selector, roomId, eventType, jsData))\n\n def leave(self, roomId):\n \"\"\"\n Description:\n ------------\n\n Related Pages:\n\n https://www.tutorialspoint.com/socket.io/socket.io_rooms.htm\n\n Attributes:\n ----------\n :param roomId: String. The room identifier\n \"\"\"\n roomId = JsUtils.jsConvertData(roomId, None)\n return JsObjects.JsVoid(\"%s.leave(%s)\" % (self._selector, roomId))\n\n def connect(self, url=None, port=None, namespace=None, from_config=None):\n \"\"\"\n Description:\n ------------\n This function will automatically add the socket to the page object.\n This must be defined first in order to be used in the various components\n\n Attributes:\n ----------\n :param url: String. The server url\n :param port: Integer. The server port\n :param namespace: String. Optional. The server namespace (or room)\n :param from_config: Python Object. An internal Server configuration object (page.js.server())\n \"\"\"\n if from_config is not None:\n self._src._props['js']['builders'].add(\"var %s = io.connect(%s)\" % (self._selector, from_config.address))\n return JsObjects.JsVoid(\"var %s = io.connect(%s)\" % (self._selector, from_config.address))\n\n elif url is None:\n self._src._props['js']['builders'].add(\"var %s = io.connect()\" % self._selector)\n return JsObjects.JsVoid(\"var %s = io.connect()\" % self._selector)\n\n if namespace is None:\n self._src._props['js']['builders'].add(\"var %s = io.connect('%s:%s')\" % (self._selector, url, port))\n return JsObjects.JsVoid(\"var %s = io.connect('%s:%s')\" % (self._selector, url, port))\n\n self._src._props['js']['builders'].add(\"var %s = io.connect('%s:%s/%s')\" % (self._selector, url, port, namespace))\n return JsObjects.JsVoid(\"var %s = io.connect('%s:%s/%s')\" % (self._selector, url, port, namespace))\n\n def on(self, eventType, jsFncs, profile=False):\n \"\"\"\n Description:\n ------------\n\n Usage::\n\n https://www.tutorialspoint.com/socket.io/socket.io_event_handling.htm\n\n Attributes:\n ----------\n :param eventType:\n :param jsFncs:\n :param profile:\n\n :return: self to allow the chaining\n \"\"\"\n if not isinstance(jsFncs, list):\n jsFncs = [jsFncs]\n eventType = JsUtils.jsConvertData(eventType, None)\n self._src.js.onReady(\"%s.on(%s, function(data) {%s})\" % (\n self._selector, eventType, JsUtils.jsConvertFncs(jsFncs, toStr=True, profile=profile)))\n return self\n\n def emit(self, eventType, jsData=None):\n \"\"\"\n Description:\n ------------\n\n Attributes:\n ----------\n :param eventType:\n :param jsData:\n \"\"\"\n jsData = JsUtils.jsConvertData(jsData or {}, None)\n eventType = JsUtils.jsConvertData(eventType, None)\n return JsObjects.JsVoid(\"%s.emit(%s, %s)\" % (self._selector, eventType, jsData))\n","repo_name":"TrendingTechnology/epyk-ui","sub_path":"epyk/core/js/packages/JsSocketIO.py","file_name":"JsSocketIO.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"24590437030","text":"import json\nfrom http.server import BaseHTTPRequestHandler, ThreadingHTTPServer\nfrom http.client import parse_headers\nfrom threading import Lock\n\nimport requests\n\n\nclass ThreadSafeIncrementer:\n def __init__(self, num_backends):\n self.value = 0\n self.num_backends = num_backends\n self._lock = Lock()\n\n def nextindex(self):\n with self._lock:\n self.value = (self.value + 1) % self.num_backends\n return self.value\n\n\nglobal_current_backend = 0\n\n\nclass RequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n\n is_request_healthy = False\n num_backends_tried = 0\n\n while not is_request_healthy and num_backends_tried < NUM_BACKENDS:\n\n # We pick a backend here\n idx = global_current_backend.nextindex()\n\n url = f\"http://{backends[idx]}/{self.path}\"\n req_header = self.parse_headers()\n\n try:\n resp = requests.get(url, headers=req_header, verify=False, timeout=3)\n except requests.ConnectTimeout or requests.exceptions.ReadTimeout as e:\n self.send_response(504)\n self.wfile.write(\"Upstream timed out\".encode(\"utf-8\"))\n return\n\n print(\"UPSTREAM STATUS:\", resp.status_code)\n is_request_healthy = True\n if resp.status_code // 100 == 5:\n is_request_healthy = False\n num_backends_tried += 1\n\n if num_backends_tried >= NUM_BACKENDS:\n self.send_response(500)\n self.send_resp_headers(resp)\n self.wfile.write(\"No backend available\".encode(\"utf-8\"))\n else:\n self.send_response(resp.status_code)\n self.send_resp_headers(resp)\n self.wfile.write(resp.content)\n\n return\n\n def parse_headers(self):\n req_header = {}\n for line in self.headers:\n line_parts = [o.strip() for o in line.split(\":\", 1)]\n if len(line_parts) == 2:\n req_header[line_parts[0]] = line_parts[1]\n return req_header\n\n def send_resp_headers(self, resp):\n respheaders = resp.headers\n for header_name in respheaders:\n if header_name not in [\n \"Content-Encoding\",\n \"Transfer-Encoding\",\n \"content-encoding\",\n \"transfer-encoding\",\n \"content-length\",\n \"Content-Length\",\n \"Connection\",\n ]:\n self.send_header(header_name, respheaders[header_name])\n self.send_header(\"Content-Length\", len(resp.content))\n self.end_headers()\n\n\ndef run():\n LISTEN_ADDR = \"0.0.0.0\"\n LISTEN_PORT = 3000\n print(\"Starting HTTP Listener\")\n server_address = (LISTEN_ADDR, LISTEN_PORT)\n httpd = ThreadingHTTPServer(server_address, RequestHandler)\n try:\n print(f\"Listening for connections at http://{LISTEN_ADDR}:{LISTEN_PORT}/\")\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.socket.close()\n\n\nNUM_BACKENDS = 0\n\n\nif __name__ == \"__main__\":\n with open(\"backend.json\") as f:\n backends = json.load(f)[\"backends\"]\n NUM_BACKENDS = len(backends)\n global_current_backend = ThreadSafeIncrementer(NUM_BACKENDS)\n run()\n","repo_name":"ameyanrd/http-loadbalancer","sub_path":"loadbalancer.py","file_name":"loadbalancer.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74548712781","text":"#Jacob Pawlak\n#May 30th, 2017\n#A Real Challenge\n#https://open.kattis.com/problems/areal\n\nfrom math import *\n\ndef main():\n\n\t#get the input (it will be a square area of a pasture)\n\tpasture = float(input())\n\troot_of_pasture = pow(pasture, .5)\n\t#the total length of the fence is 4 * the side length ^^\n\tprint(4 * root_of_pasture)\n\n\nmain()\n","repo_name":"JacobPawlak/KattisSolutions","sub_path":"arealchallenge.py","file_name":"arealchallenge.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"38830659561","text":"# By dark cobra for Dark cobra with logger support\n# Kang with credits..\n\nimport asyncio\nfrom asyncio import wait\nfrom userbot import CMD_HELP\n\n\nfrom userbot.events import register\n\n@register(outgoing=True, pattern=\"^.tspam\")\nasync def tmeme(e):\n tspam = str(e.text[7:])\n message = tspam.replace(\" \", \"\")\n for letter in message:\n await e.respond(letter)\n await e.delete()\n\n@register(outgoing=True, pattern=\"^.spam\")\nasync def spammer(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n message = e.text\n counter = int(message[6:8])\n spam_message = str(e.text[8:])\n await asyncio.wait([e.respond(spam_message) for i in range(counter)])\n await e.delete()\n if LOGGER:\n await e.client.send_message(\n LOGGER_GROUP,\n \"#SPAM \\n\\n\"\n \"Spam was executed successfully\"\n )\n \n@register(outgoing=True, pattern=\"^.bigspam\")\nasync def bigspam(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n message = e.text\n counter = int(message[9:13])\n spam_message = str(e.text[13:])\n for i in range(1, counter):\n await e.respond(spam_message)\n await e.delete()\n if LOGGER:\n await e.client.send_message(\n LOGGER_GROUP,\n \"#BIGSPAM \\n\\n\"\n \"Bigspam was executed successfully\"\n )\n \n \n@register(outgoing=True, pattern=\"^.pspam\")\nasync def tiny_pic_spam(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n message = e.text\n text = message.split()\n counter = int(text[1])\n link = str(text[2])\n for i in range(1, counter):\n await e.client.send_file(e.chat_id, link)\n await e.delete()\n if LOGGER:\n await e.client.send_message(\n LOGGER_GROUP,\n \"#PICSPAM \\n\\n\"\n \"PicSpam was executed successfully\"\n )\n@register(outgoing=True, pattern=\"^.delayspam (.*)\")\nasync def spammer(e):\n spamDelay = float(e.pattern_match.group(1).split(' ', 2)[0])\n counter = int(e.pattern_match.group(1).split(' ', 2)[1])\n spam_message = str(e.pattern_match.group(1).split(' ', 2)[2])\n await e.delete()\n for i in range(1, counter):\n await e.respond(spam_message)\n await asyncio.sleep(spamDelay)\n if LOGGER:\n await e.client.send_message(\n LOGGER_GROUP, \"#DelaySPAM\\n\"\n \"DelaySpam was executed successfully\")\n \n\nCMD_HELP.update(\n {\n \"spam\": \".spam \"\n \"\\nUsage: spams the current chat, the current limit for this is from 1 to 99.\\n\\n\"\n \".bigspam \"\n \"\\nUsage: Spams the current chat, the current limit is above 100.\\n\\n\"\n \".pspam \"\n \"\\nUsage: Spams the current chat with number you pics you did put in .\\n\\n\"\n \".delayspam \"\n \"\\nUsage: Spams the current chat with with the input msgs with a delay time that has been given as its input.\\n\\n\"\n }\n)\n","repo_name":"pro-boy/Marshmello","sub_path":"userbot/plugins/spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"3341201812","text":"import keypad\nimport board\nfrom digitalio import DigitalInOut, Direction, Pull\n\nkm = keypad.KeyMatrix(\n # NOTE!!! Double check your pinout. These are for SparkFun Thing Plus RP2040\n row_pins=(board.D18, board.D17, board.D16),\n column_pins=(board.D19, board.D20, board.D21, board.D22),\n)\n\nswitch = DigitalInOut(board.SDA) \nswitch.direction = Direction.INPUT\nswitch.pull = Pull.UP\n\n# too lazy to figure out which row/column is which, so I just re-mapped all the numbers\nkey_map = [5, 2, 11, 8, 3, 0, 9, 6, 4, 1, 10, 7]\n\nclass Buttons():\n def __init__(self):\n self._pressed_keys = []\n\n def poll_keys(self):\n event = km.events.get()\n if event:\n if event.pressed:\n self._pressed_keys.insert(0, key_map[event.key_number])\n else:\n self._pressed_keys.remove(key_map[event.key_number])\n\n @property\n def pressed_key(self):\n self.poll_keys()\n return self._pressed_keys[0] if self._pressed_keys else None\n\n @property\n def phone_off_hook(self):\n return not switch.value\n\n \n ","repo_name":"dupontgu/strange-phone-circuitpython","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"39136275927","text":"from typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n num_to_idx = {}\n \n # 키와 값을 바꿔서 딕셔너리로 저장\n for i, num in enumerate(nums):\n num_to_idx[num] = i\n\n # 타겟에서 첫 번째 수를 뺀 결과를 키�� 조회\n for i, num in enumerate(nums):\n if target - num in num_to_idx and i != num_to_idx[target - num]:\n return [i, num_to_idx[target - num]]\n\n\n### Time Complexity\n# for문은 1중첩 뿐이고, dict(해시테이블)의 조회는 평균 O(1) 이므로 전체는 O(n).\n\n### Note\n# 속도는 dict가 제일 빠르다는 것을 기억하자.\n","repo_name":"yg-moon/problem-solving","sub_path":"python-algorithm-interview/my-solutions/3-linear-data-structures/ch07/7-3.py","file_name":"7-3.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"15265369106","text":"# -*- coding:utf-8 -*-\n# coding=\n\n# from django.conf.urls.defaults import patterns, include, url\nfrom django.conf.urls import url, include\n# from django.conf.urls import patterns, url, include\n# from django.conf.urls.defaults import *\n\nimport todoes.views\nimport todoes.api\nimport assets.views\nimport save_state.api\nimport snmp.api\nimport assets.test_view\nimport assets.api\nimport logs.views\nimport user_settings.views\nimport djlib\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout, \\\n password_change, password_change_done\nfrom django.conf import settings\n\nadmin.autodiscover()\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\n# example from tutorial for 1.10\n# urlpatterns = [\n# url('^$', views.myview),\n# url('^other/$', views.otherview),\n# ]\n# urlpatterns = patterns('',\nurlpatterns = [\n # просмотр задач\n url(r'^tasks/$', todoes.views.tasks),\n # просмотр всех задач по страницам\n url(r'^all_task/([^/]+)/$', todoes.views.all_tasks),\n # просмотр сообщения\n url(r'^messages/show/(\\d+)/$',\n todoes.views.messages_show_message),\n\n # обычные задачи\n url(r'^new_ticket/$', todoes.views.new_ticket),\n url(r'^edit/([^/]+)/$', todoes.views.edit_task),\n # подвтерждение срока переноса задачи\n url(r'^task_accept_request_due_date/(\\d+)/$',\n todoes.views.accept_request_due_date),\n # отклонение срока переноса задачи\n url(r'^task_reject_request_due_date/(\\d+)/$',\n todoes.views.reject_request_due_date),\n # закрытие / отмена закрытия заявки\n url(r'^close/([^/]+)/$', todoes.views.close_task),\n url(r'^unclose/([^/]+)/$', todoes.views.unclose_task),\n # подтверждение выполнения задачи\n url(r'^confirm/([^/]+)/$', todoes.views.confirm_task),\n # повторяющиеся задачи\n # создание повторяющейся задачи\n url(r'^new_regular_ticket/$', todoes.views.new_regular_ticket),\n # редактирование повторяющейся задачи\n url(r'^edit_regular/([^/]+)/$', todoes.views.edit_regular_task),\n # отметка как сделанная повторяющейся задачи\n url(r'^regular_task_done/([^/]+)/$',\n todoes.views.regular_task_done),\n # общее для всех задач\n url(r'^task/([^/]+)/(\\d+)/$', todoes.views.task),\n # установка напоминалки повторяющейся задачи\n # удаление повторяющейся задачи\n url(r'^deleted_tasks/$', todoes.views.deleted_tasks),\n url(r'^delete/([^/]+)/(\\d+)/$', todoes.views.delete_task),\n url(r'^completle_delete/([^/]+)/(\\d+)/$',\n todoes.views.completle_delete_task),\n url(r'^undelete/([^/]+)/(\\d+)/$', todoes.views.undelete_task),\n url(r'^add_children_task/([^/]+)/(\\d+)/$',\n todoes.views.add_children_task),\n # http://192.168.1.157:8080/move_to_call/47\n # изменение категории на \"Звонки\"\n url(r'^move_to_call/([^/]+)/(\\d+)/$', todoes.views.move_to_call),\n # http://192.168.1.157:8080/set_reminder/47\n # установка напоминания для задачи\n url(r'^set_reminder/([^/]+)/(\\d+)/$', todoes.views.set_reminder),\n # Для администратора:\n url(r'^users/$', todoes.views.get_all_logged_in_users),\n url(r'^users/activity_history/([^/]+)/([^/]*)/$',\n todoes.views.get_user_activity_history),\n url(r'^tasks/to/([^/]+)/$', todoes.views.to),\n # добавление сообщения\n url(r'^messages/add/$', todoes.views.messages_add),\n # API для задач\n # Получение человеческого представления hardcore-style при\n # создании регулярной задачи\n url(r'^api/crontab_to_russian/([^/]+)/$',\n todoes.api.crontab_to_human),\n\n url(r'^accounts/$', login),\n url(r'^login/$', login),\n url(r'^accounts/login/$', login),\n url(r'^test/password2/$', password_change),\n url(r'^password_change_done/$', password_change_done),\n url(r'^accounts/register/$', todoes.views.register),\n url(r'^accounts/logout/$', logout),\n url(r'^accounts/profile/$', todoes.views.profile),\n # Uncomment the admin/doc line below to enable admin documentation\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', todoes.views.tasks),\n\n # изменение языка интерфейса\n url(r'^language/([^/]+)/$',\n djlib.multilanguage_utils.change_language),\n\n # Работа с активами\n # Добавление чека, где указывается плата + сколько там чего\n url(r'^bill/cash/add/$', assets.views.bill_cash_add),\n # Добавление счёта, где указывается плата + сколько там чего\n url(r'^bill/cashless/add/$', assets.views.bill_cashless_add),\n # Просмотр списка счетов, как по налу так и по безналу с фильтрами\n url(r'^all_bills/$', assets.views.all_bills),\n # Просмотр конкретного чека/счёта - тип,id\n url(r'^bill/show/([^/]+)/(\\d*)/$', assets.views.show_bill),\n # Список всех удалённых чеков/счётов\n url(r'^all_deleted_bills/$', assets.views.all_deleted_bills),\n # Просмотр активов по категориям\n url(r'^assets_by_type/(\\d*)/$', assets.views.assets_by_type),\n # Просмотр актива\n url(r'^asset/(\\d*)/$', assets.views.asset_view),\n\n # API для работы с активами\n # Выдача формы добавления актива, в качестве параметра\n # категория актива, префикс к имени полей формы (число)\n url(r'^api/get_asset_add_form/(\\d+)/(\\d*)/$',\n assets.api.get_asset_add_form),\n # Выдача заголовка для формы добавления актива\n url(r'^api/get_asset_add_form_header/$',\n assets.api.get_asset_add_form_header),\n # Выдача скрипта для формы добавления актива\n url(r'^api/get_asset_add_form_script/(\\d+)/(\\d*)/$',\n assets.api.get_asset_add_form_script),\n # Выдача списка поставщиков, в качестве параметра -\n # тот поставщик, который должен быть указан, name\n url(r'^api/get_contractors_list/([^/]*)/$',\n assets.api.get_contractors_list),\n # Выдача формы добавления поставщика, в качестве параметра -\n # название\n url(r'^api/get_new_contractor_add_form/([^/]*)/$',\n assets.api.get_new_contractor_add_form),\n # Сохраняем нового поставщика\n url(r'^api/save_new_contractor/$',\n assets.api.save_new_contractor),\n # Получаем список типов активов, в качестве парамета -\n # id выбранного\n url(r'^api/get_asset_type_list/(\\d*)/$',\n assets.api.get_asset_type_list),\n # Пометить конкретный чек/счёт к удалению - тип,id\n url(r'^api/bill/delete/([^/]+)/(\\d*)/$',\n assets.api.mark_as_deleted_bill),\n url(r'^bill/delete/([^/]+)/(\\d*)/$',\n assets.api.mark_as_deleted_bill),\n # Удалить конкретный чек/счёт - тип,id\n url(r'^api/bill/full_delete/([^/]+)/(\\d*)/$',\n assets.api.full_delete_bill),\n url(r'^bill/full_delete/([^/]+)/(\\d*)/$',\n assets.api.full_delete_bill),\n # Получение списка активов по категориям\n url(r'^api/assets_by_type/(\\d+)/$', assets.api.assets_by_type),\n # Удаление актива - id актива, id категории к которой вернуться\n # при ошибки\n url(r'^api/asset/delete/(\\d+)/(\\d+)/$', assets.api.asset_delete),\n # Редактирование актива - id актива\n url(r'^api/asset/edit/(\\d+)/$', assets.api.asset_edit),\n # получение json списка моделей для типа активов- тип актива\n url(r'^api/asset_types/models/get/(\\d+)/$',\n assets.api.get_models_list_json),\n # Получение формы для добавления нового типа актива\n url(r'^api/asset_types/type/add/$',\n assets.api.get_new_asset_type_add_form),\n # Сохраняем новый тип актива\n url(r'^api/asset_types/type/save/$',\n assets.api.get_new_asset_type_save),\n # Редактирование актива - id актива\n url(r'^api/asset/save_edited/(\\d+)/$',\n assets.api.asset_save_edited),\n # Получаем форму для добавления актива - id типа актива,\n # имя модели\n url(r'^api/get_new_model_add_form/(\\d+)/(.+)/$',\n assets.api.get_new_asset_model_add_form),\n # Сохраняем новую модель актива- id типа актива\n url(r'^api/asset_types/model/save/(\\d+)/$',\n assets.api.save_new_model),\n # Меняем пройденные этапы для счёта по безналу - номер счёта,\n # название этапа, включить/выключить (провести/отменить\n # проведение), послать таблицу или перенаправить страницу?\n url(r'^api/bill/cashless/edit/stages/(\\d+)/([^/]+)/(\\d+)/(\\d+)$',\n assets.api.cashless_edit_stages),\n # API для выдачи JSON\n # Список моделей актива для типа актива - id типа актива\n url(r'^api/json/get/models/(\\d+)/$', assets.api.json_models),\n # Получение цены и срока гарантии для последнего купленного\n # актива этой модели этой фирмы. Данные передаются через\n # POST запрос\n url(r'^api/json/get/price_and_warranty/$',\n assets.api.json_price_and_warranty),\n # Логирование и т.п.\n url(r'^show_last_logs/(\\d*)/$', logs.views.show_last_logs),\n # API для сохранения статусов\n # сохранение статуса через http\n url(\n r'^api/state/save_by_http/'\n r'([^/]+)/([^/]+)/([^/]+)/(\\d+)/([^/]*)/(\\d+)/([^/]+)/$',\n save_state.api.save_by_http),\n # просмотр статусов\n url(r'^api/state/show_states/([^/]+)/$',\n save_state.api.show_states),\n # API snmp\n # просмотр карты сети\n url(r'^api/snmp/show_network_map/$',\n snmp.api.show_network_map),\n # просмотр карты роутера по community string & ip\n url(r'^api/snmp/show_router_mapping/([^/]+)/(\\d+.\\d+.\\d+.\\d+)/$',\n snmp.api.show_router_mapping),\n # просмотр карты по номеру роутера в базе\n url(r'^api/snmp/show_router_mapping_by_id/(\\d+)/$',\n snmp.api.show_router_mapping_by_id),\n # определение производителя по маку\n url(\n r'^api/snmp/brand_by_mac/'\n r'([0-9a-fA-F]{6})/$',\n snmp.api.brand_by_mac),\n # определение имени по ip\n url(r'^api/snmp/name_by_ip/(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3})/$',\n snmp.api.name_by_ip),\n # определение имени по ip\n url(\n r'^api/snmp/find_by_mac/'\n r'([0-9a-fA-F]{2}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}:'\n r'[0-9a-fA-F]{2}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2})/$',\n snmp.api.find_by_mac),\n\n # Настройки\n url(r'^settings/$', user_settings.views.show_settings),\n # Показ настроек для пользователя\n url(r'^settings/user/([^/]*)/$',\n user_settings.views.show_user_settings),\n # Сохранить настройку после редактирования\n url(r'^api/setting/save/([^/]+)/([^/]+)/$',\n user_settings.views.save_edited_setting),\n # Выдать форму для редактирования настроек, берущихся из БД\n url(r'^api/setting/edit_from_bd/([^/]+)/([^/]+)/$',\n user_settings.views.edit_from_bd),\n # Сохранить настройку из БД после редактирования\n url(r'^api/setting/save_from_bd/([^/]+)/([^/]+)/$',\n user_settings.views.save_from_bd),\n # Модули\n # Включить модуль\n url(r'^api/setting/run/([^/]+)/$',\n user_settings.views.run_module),\n # Выключить модуль\n url(r'^api/setting/stop/([^/]+)/$',\n user_settings.views.stop_module),\n\n # Тестированание\n # url(r'^test/bill/add/$', assets.test_view.bill_add),\n url(r'^test/test_cm/$', assets.test_view.test_cm),\n # url(r'^test/password/$', assets.test_view.password),\n # (r'^change-password/$',\n # 'django.contrib.auth.views.password_change'),\n # (r'^password-changed/$',\n # 'django.contrib.auth.views.password_change_done'),\n url(r'^test/cashless_maintenance/$',\n assets.test_view.cashless_maintenance),\n\n # (r'^i18n/', include('django.conf.urls.i18n')),\n]\n# )\nif settings.DEBUG:\n from django.views.static import serve\n # urlpatterns += patterns('',\n urlpatterns.append(\n # url(r'^media/(?P.*)$', 'django.views.static.serve',\n url(r'^media/(?P.*)$', serve,\n {'document_root': settings.MEDIA_ROOT})\n )\n # )\n","repo_name":"Ishayahu/MJCC-tasks","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"18663888209","text":"n, x = list(map(int, input().split()))\r\ns = []\r\nfor i in range(x):\r\n s.append(list(map(float, input().split())))\r\n\r\na = list(zip(*s))\r\nfor j in range(n):\r\n t = 0\r\n for k in range(len(a[j])):\r\n t += a[j][k]\r\n print(round(t/x, 1))\r\n","repo_name":"dlsnoopy95052/test1","sub_path":"test65.py","file_name":"test65.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28969068388","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom enum import Enum\nfrom datetime import datetime, timedelta\nfrom fastapi import HTTPException\nfrom fastapi.responses import FileResponse\n\n\ndef verify_hydrodata_measured(station_id, parameter, start_date, end_date):\n return True\n\n\ndef get_hydrodata_measured(filesystem, station_id, parameter, start_date, end_date):\n station_dir = os.path.join(filesystem, \"media/bafu/hydrodata/CSV\", str(station_id))\n if not os.path.exists(station_dir):\n raise HTTPException(status_code=400,\n detail=\"No data available for station id: {}\".format(station_id))\n if not os.path.exists(os.path.join(station_dir, parameter)):\n raise HTTPException(status_code=400,\n detail='Parameter \"{}\" not available for station {}, please select from: {}'.format(parameter, station_id, \", \".join(os.listdir(station_dir))))\n folder = os.path.join(station_dir, parameter)\n start_date = datetime.strptime(start_date, '%Y%m%d')\n end_date = datetime.strptime(end_date, '%Y%m%d')\n files = [os.path.join(folder, \"BAFU_{}_{}_{}.csv\".format(station_id, parameter, (start_date+timedelta(days=x)).strftime(\"%Y-%m-%d\")))\n for x in range((end_date-start_date).days + 1)]\n bad_files = []\n for file in files:\n if not os.path.isfile(file):\n bad_files.append(file.split(\"/\")[-1].split(\".\")[0][-10:])\n if len(bad_files) > 0:\n raise HTTPException(status_code=400,\n detail=\"Data not available for station {} ({}) for the following dates: {}\".format(station_id, parameter, \", \".join(bad_files)))\n\n df = pd.concat(map(pd.read_csv, files), ignore_index=True)\n return {\"Time\": list(df[\"Time\"]), parameter: list(df[\"BAFU_{}_{}\".format(station_id, parameter)])}\n\n\nclass HydrodataPredicted(str, Enum):\n official = \"official\"\n unofficial = \"unofficial\"\n\n\ndef verify_hydrodata_predicted(status, station_id, parameter):\n return True\n\n\ndef get_hydrodata_predicted(filesystem, status, station_id, model):\n file = os.path.join(filesystem, \"media/bafu/hydrodata\", \"pqprevi-\" + status, \"Pqprevi_{}_{}.txt\".format(model, station_id))\n if not os.path.exists(file):\n raise HTTPException(status_code=400,\n detail=\"Prediction not available for model {} at station {}.\".format(model, station_id))\n return FileResponse(file)\n\n\ndef metadata_hydrodata_total_lake_inflow(filesystem):\n output = []\n folder = os.path.join(filesystem, \"media/bafu/hydrodata/TotalInflowLakes\")\n lakes = os.listdir(folder)\n for lake in lakes:\n output.append({\"lake\": lake, \"parameters\": os.listdir(os.path.join(folder, lake))})\n return output\n\n\ndef verify_hydrodata_total_lake_inflow(lake, parameter, start_date, end_date):\n return True\n\n\ndef get_hydrodata_total_lake_inflow(filesystem, lake, parameter, start_date, end_date):\n lake_dir = os.path.join(filesystem, \"media/bafu/hydrodata/TotalInflowLakes\", str(lake))\n if not os.path.exists(lake_dir):\n raise HTTPException(status_code=400,\n detail=\"No data available for lake: {}\".format(lake))\n if not os.path.exists(os.path.join(lake_dir, parameter)):\n raise HTTPException(status_code=400,\n detail='Parameter \"{}\" not available for {}, please select from: {}'.format(\n parameter, lake, \", \".join(os.listdir(lake_dir))))\n folder = os.path.join(lake_dir, parameter)\n start_date = datetime.strptime(start_date, '%Y%m%d')\n end_date = datetime.strptime(end_date, '%Y%m%d')\n files = [os.path.join(folder, \"{}_{}_{}.csv\".format(lake, parameter,\n (start_date + timedelta(days=x)).strftime(\"%Y-%m-%d\")))\n for x in range((end_date - start_date).days + 1)]\n bad_files = []\n for file in files:\n if not os.path.isfile(file):\n bad_files.append(file.split(\"/\")[-1].split(\".\")[0][-10:])\n if len(bad_files) > 0:\n raise HTTPException(status_code=400,\n detail=\"Data not available for {} ({}) for the following dates: {}\".format(\n lake, parameter, \", \".join(bad_files)))\n\n df = pd.concat(map(pd.read_csv, files), ignore_index=True)\n return df.to_json()\n","repo_name":"eawag-surface-waters-research/alplakes-fastapi","sub_path":"app/bafu.py","file_name":"bafu.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"484131213","text":"from typing import Dict, Any\n\nimport pytest\nfrom appium import webdriver\nfrom appium.options.common import AppiumOptions\nfrom appium.webdriver.appium_service import AppiumService\n\n\n@pytest.fixture(scope=\"function\")\ndef appium_driver():\n cap: Dict[str, Any] = {\n 'platformName': 'Android',\n 'automationName': \"uiautomator2\",\n 'deviceName': 'Android',\n 'appPackage': 'com.hmh.api',\n 'appActivity': '.ApiDemos',\n 'language': 'en',\n 'locale': 'US'\n }\n\n url = 'http://localhost:4724'\n global driver\n global appium_servie\n appium_servie = AppiumService()\n appium_servie.start()\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', options=AppiumOptions().load_capabilities(cap))\n yield driver\n driver.quit\n appium_servie.stop()\n\n\n@pytest.mark.usefixtures(\"appium_driver\")\ndef test_demo(appium_driver):\n print(\"started service\")","repo_name":"lokesh771988/Appium_python","sub_path":"test_startServices.py","file_name":"test_startServices.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34968007863","text":"import json\nimport os\nimport pytest\nimport time\nfrom PySPEC import PySPEC\n\n@pytest.mark.skipif(pytest.cfg_bitstream is None,\n reason=\"We need a bitstream to reflash\")\nclass TestFlatSignal(object):\n \"\"\"\n Collection of regression tests involving carrier reflashing\n \"\"\"\n\n @pytest.mark.skipif(pytest.is_spec is False,\n reason=\"We need a bitstream to reflash\")\n @pytest.mark.repeat(100)\n @pytest.mark.parametrize(\"size\", [100000])\n def test_fpga_reconfiguration_spec(self, fmc_adc_100m, size):\n \"\"\"\n The SPEC FPGA could be misconfgured an leading to not acquiring data\n from one of the channels. The problem shows itself with a channel\n delivering only zeros.\n \"\"\"\n spec = fmc_adc_100m.carrier\n spec.program_fpga(pytest.cfg_bitstream)\n pattern = 0x555\n fmc_adc_100m.pattern_data = pattern\n for chan in range(4):\n path = os.path.join(fmc_adc_100m.sys_dev_path,\n \"cset0/chan{:d}/current-value\".format(chan))\n sum = 0\n with open(path) as file:\n for i in range(size):\n file.seek(0)\n value = int(file.read())\n assert (value >> 2) == pattern\n sum += value\n # Should we sleep? It is not the end of the\n # world if we read twice the same value: the real issue\n # is that everything is zero\n assert sum != 0, \"Missing data on channel {:d}\".format(chan)\n\n tool = \"/acc/local/L867/drv/adc-lib/4.0.3/bin/adc-acq\"\n cmd = \"sudo {} -D fmc-adc-100m14b4cha@0x{:x} -a 0,1000,1 --stat -s 0 --trg-sw 1\".format(tool,\n fmc_adc_100m.dev_id)\n ret = os.popen(cmd)\n data = json.loads(ret.read().strip())\n for chan in data[\"statistics\"]:\n assert chan[\"average\"] != 0, \"Flat signal on channel {}\".format(chan[\"chan\"])\n time.sleep(1)\n\n\n @pytest.mark.skipif(pytest.is_fec is True,\n reason=\"We must be NOT on a FEC\")\n @pytest.mark.parametrize(\"fec\", [pytest.fec])\n @pytest.mark.parametrize(\"dev_id\", [pytest.dev_id])\n @pytest.mark.repeat(100)\n def test_reboot(self, fec, dev_id):\n os.system(\"ssh -T {} 'sudo reboot'\".format(fec))\n time.sleep(90)\n\n tool = \"/acc/local/L867/drv/adc-lib/4.0.3/bin/adc-acq\"\n cmd = \"sudo {} -D fmc-adc-100m14b4cha@0x{:x} -a 0,1000,1 --stat -s 0 --trg-sw 1\".format(tool, dev_id)\n ret = os.popen(\"ssh -T {} '{}'\".format(fec, cmd))\n data = json.loads(ret.read().strip())\n for chan in data[\"statistics\"]:\n assert chan[\"average\"] != 0\n","repo_name":"vascoguita/fmc-adc-100m14b4cha","sub_path":"pytest/regressions/test_reprogramming.py","file_name":"test_reprogramming.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17103050828","text":"from typing import Any, Generator\n\nimport pytest\nfrom flask import Flask\nfrom flask.testing import FlaskClient\nfrom sqlalchemy import Inspector, inspect, text\n\nfrom budget_book_backend import create_app\nfrom budget_book_backend.models.db_setup import DbSetup\nfrom tests.setup_db import setup_db\n\n\n@pytest.fixture\ndef app() -> Flask:\n \"\"\"Initlaize the flask app for testing purposes.\"\"\"\n test_app: Flask = create_app(\n test_config=dict(DATABASE=\"sqlite:///tests/test.db\")\n )\n\n return test_app\n\n\n@pytest.fixture\ndef client(app: Flask) -> FlaskClient:\n \"\"\"Expose the client of the app being used to mock requests.\"\"\"\n return app.test_client()\n\n\n@pytest.fixture(scope=\"function\")\ndef use_test_db(app: Flask) -> Generator[None, Any, None]:\n \"\"\"Set up, expose, and take down the database used for the tests.\"\"\"\n\n setup_db()\n\n with app.app_context():\n DbSetup.set_engine()\n\n yield\n\n # Tear down the test databse\n inspector: Inspector = inspect(DbSetup.engine)\n with DbSetup.engine.connect() as conn:\n for table in inspector.get_table_names():\n conn.execute(text(f\"DROP TABLE IF EXISTS {table};\"))\n","repo_name":"LukasErekson/budget-books-backend","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41161888814","text":"import torch\r\nimport itertools\r\nfrom util.image_pool import ImagePool\r\nfrom .base_model import BaseModel\r\nfrom . import networks_scit_seg as networks\r\nfrom torchsummary import summary\r\n\r\n\r\nclass ScitSegModel(BaseModel):\r\n @staticmethod\r\n def modify_commandline_options(parser, is_train=True):\r\n parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout\r\n if is_train:\r\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\r\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\r\n parser.add_argument('--lambda_identity', type=float, default=0.5, help='')\r\n return parser\r\n\r\n def __init__(self, opt):\r\n BaseModel.__init__(self, opt)\r\n # specify the training losses you want to print out. The training/test scripts will call \r\n self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B', 'style_A', 'style_B']\r\n # specify the images you want to save/display. The training/test scripts will call \r\n visual_names_A = ['real_A', 'seg_A', 'fake_B', 'rec_A']\r\n visual_names_B = ['real_B', 'seg_B', 'fake_A', 'rec_B']\r\n if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)\r\n visual_names_A.append('idt_B')\r\n visual_names_B.append('idt_A')\r\n self.opt.display_ncols += 1\r\n\r\n self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B\r\n # specify the models you want to save to the disk. The training/test scripts will call and .\r\n if self.isTrain:\r\n self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']\r\n else: # during test time, only load Gs\r\n self.model_names = ['G_A', 'G_B']\r\n\r\n self.netG_A = networks.define_G(opt.ngf, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.opt.gpu_ids)\r\n self.netG_B = networks.define_G(opt.ngf, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.opt.gpu_ids)\r\n # summary(self.netG_A, input_size=[(3, 256, 256), (1, 256, 256)])\r\n\r\n if self.isTrain: # define discriminators\r\n self.netD_A = networks.define_D(opt.ndf, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\r\n # summary(self.netD_A, input_size=[(3, 256, 256), (1, 256, 256)])\r\n self.netD_B = networks.define_D(opt.ndf, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\r\n if self.isTrain:\r\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\r\n assert(opt.input_nc == opt.output_nc)\r\n self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\r\n self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\r\n # define loss functions\r\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.\r\n self.criterionCycle = torch.nn.L1Loss()\r\n self.criterionIdt = torch.nn.L1Loss()\r\n self.criterionStyle = networks.VGGLoss(self.opt.gpu_ids)\r\n # initialize optimizers; schedulers will be automatically created by function .\r\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\r\n self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\r\n self.optimizers.append(self.optimizer_G)\r\n self.optimizers.append(self.optimizer_D)\r\n\r\n def set_input(self, input):\r\n AtoB = self.opt.direction == 'AtoB'\r\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\r\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\r\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\r\n self.seg_A = input['seg_A'].to(self.device)\r\n self.seg_B = input['seg_B'].to(self.device)\r\n\r\n def forward(self):\r\n \"\"\"Run forward pass; called by both functions and .\"\"\"\r\n self.fake_B = self.netG_A(self.real_A, self.seg_A) # G_A(A)\r\n self.rec_A = self.netG_B(self.fake_B, self.seg_B) # G_B(G_A(A))\r\n self.fake_A = self.netG_B(self.real_B, self.seg_B) # G_B(B)\r\n self.rec_B = self.netG_A(self.fake_A, self.seg_B) # G_A(G_B(B))\r\n\r\n def backward_D_basic(self, netD, real, fake, seg_real, seg_fake):\r\n # Real\r\n pred_real = netD(real, seg_real)\r\n loss_D_real = self.criterionGAN(pred_real, True, seg_real)\r\n # Fake\r\n pred_fake = netD(fake.detach(), seg_fake)\r\n loss_D_fake = self.criterionGAN(pred_fake, False, seg_fake)\r\n # Combined loss and calculate gradients\r\n loss_D = (loss_D_real + loss_D_fake) * 0.5\r\n loss_D.backward()\r\n return loss_D\r\n\r\n def backward_D_A(self):\r\n \"\"\"Calculate GAN loss for discriminator D_A\"\"\"\r\n fake_B = self.fake_B_pool.query(self.fake_B)\r\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B, self.seg_B, self.seg_A)\r\n\r\n def backward_D_B(self):\r\n \"\"\"Calculate GAN loss for discriminator D_B\"\"\"\r\n fake_A = self.fake_A_pool.query(self.fake_A)\r\n self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A, self.seg_A, self.seg_B)\r\n\r\n def backward_G(self):\r\n \"\"\"Calculate the loss for generators G_A and G_B\"\"\"\r\n lambda_idt = self.opt.lambda_identity\r\n lambda_A = self.opt.lambda_A\r\n lambda_B = self.opt.lambda_B\r\n # Identity loss\r\n if lambda_idt > 0:\r\n # G_A should be identity if real_B is fed: ||G_A(B) - B||\r\n self.idt_A = self.netG_A(self.real_B, self.seg_B)\r\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\r\n # G_B should be identity if real_A is fed: ||G_B(A) - A||\r\n self.idt_B = self.netG_B(self.real_A, self.seg_A)\r\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\r\n else:\r\n self.loss_idt_A = 0\r\n self.loss_idt_B = 0\r\n\r\n # GAN loss D_A(G_A(A))\r\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B, self.seg_A), True, self.seg_A)\r\n # GAN loss D_B(G_B(B))\r\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A, self.seg_B), True, self.seg_B)\r\n # style loss\r\n self.loss_style_A = self.criterionStyle(self.fake_B * self.seg_A, self.real_A * self.seg_A) * self.opt.lambda_style\r\n self.loss_style_B = self.criterionStyle(self.fake_A * self.seg_B, self.real_B * self.seg_B) * self.opt.lambda_style\r\n # self.loss_style_A = 0\r\n # self.loss_style_B = 0\r\n # Forward cycle loss || G_B(G_A(A)) - A||\r\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\r\n # Backward cycle loss || G_A(G_B(B)) - B||\r\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\r\n\r\n # combined loss and calculate gradients\r\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + \\\r\n self.loss_idt_A + self.loss_idt_B + self.loss_style_A + self.loss_style_B\r\n self.loss_G.backward()\r\n\r\n def optimize_parameters(self):\r\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\r\n # forward\r\n self.forward() # compute fake images and reconstruction images.\r\n # G_A and G_B\r\n self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs\r\n self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero\r\n self.backward_G() # calculate gradients for G_A and G_B\r\n self.optimizer_G.step() # update G_A and G_B's weights\r\n # D_A and D_B\r\n self.set_requires_grad([self.netD_A, self.netD_B], True)\r\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\r\n self.backward_D_A() # calculate gradients for D_A\r\n self.backward_D_B() # calculate graidents for D_B\r\n self.optimizer_D.step() # update D_A and D_B's weights\r\n\r\n def fuse_real_fake(self, realA, fakeB, segA):\r\n B, C, H, W = realA.size()\r\n fuse_img = torch.zeros_like(realA)\r\n if torch.min(segA) < 0: # <-1, 1> -> <0, 1>\r\n segA = (segA + 1) / 2\r\n for batch in range(B):\r\n real = realA[batch]\r\n fake = fakeB[batch]\r\n seg = segA[batch]\r\n fuse_img[batch] = real * (1 - seg) + fake * seg\r\n return fuse_img\r\n\r\n def test(self):\r\n with torch.no_grad():\r\n self.forward()\r\n self.fake_B = self.fuse_real_fake(self.real_A, self.fake_B, self.seg_A)\r\n self.fake_A = self.fuse_real_fake(self.real_B, self.fake_A, self.seg_B)\r\n self.compute_visuals()\r\n","repo_name":"xml94/SCIT","sub_path":"models/scit_seg_model.py","file_name":"scit_seg_model.py","file_ext":"py","file_size_in_byte":9405,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"31971678734","text":"#Design python application which creates two threads as evenfactor and oddfactor.\n#Both the thread accept one parameter as integer.\n#Evenfactor thread will display addition of even factors of given number and oddfactor will display addition of odd factors of given number. \n#After execution of both the thread gets completed main thread should display message as \"exit from main\".\n\n# Author: Apurva Anil Jogal\n# Date : 5th April 2019\n\nfrom threading import *;\n\ndef funEvenFactor(number):\n\t\n\tfor i in range(1,number):\n\t\n\t\tif( number%i ==0 and i%2==0):\t\n\t\t\tprint(\"funEvenFactor\",i)\n\t\ndef funOddFactor(number):\n\tfor i in range(1,number):\n\t\tif(number%i ==0 and i%2 != 0):\n\t\t\tprint(\"funOddFactor\",i)\n\t\t\t\n\ndef main():\n\n\tno = input(\"Enter a number\");\n\t\t\n\tevenfactor = Thread(target= funEvenFactor,args = (no,));\t\t\n\toddfactor = Thread(target= funOddFactor, args = (no,));\n\t\n\t\n\t# Will execute both in parallel\n\tevenfactor.start()\n\toddfactor.start()\n\t\n\t# Joins threads back to the parent process, which is this program.\n\tevenfactor.join()\n\toddfactor.join()\n\t\n\tprint(\"Exit from main\");\n\t\nif __name__ == \"__main__\":\n\tmain();\n\n\n","repo_name":"ApurvaJogal/python_Assignments","sub_path":"Assignment8/Assignment8_2.py","file_name":"Assignment8_2.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"143187244","text":"import sys\nimport boto3\n\nfrom pyspark.sql.session import SparkSession\nfrom awsglue.utils import getResolvedOptions\nfrom awsglue.context import GlueContext, DynamicFrame\nfrom awsglue.job import Job\nimport pyspark.sql.functions as func\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType, DecimalType, DateType, TimestampType, FloatType\n\nimport glueLibraryV2 as gl2\n\nargs = getResolvedOptions(sys.argv, ['JOB_NAME','BUCKET_ORIG','BUCKET_DEST','BUCKET_CONF','DB_NAME', 'ROUTE','FORMAT','PREFIX_TABLE_DEST','SUFIX_TABLE_DEST'])\n\nspark = SparkSession.builder.config('spark.serializer','org.apache.spark.serializer.KryoSerializer')\\\n .config('spark.sql.hive.convertMetastoreParquet', 'false')\\\n .config(\"spark.sql.parquet.datetimeRebaseModeInRead\", \"CORRECTED\")\\\n .config(\"spark.sql.avro.datetimeRebaseModeInWrite\", \"CORRECTED\")\\\n .getOrCreate()\n\nglueContext = GlueContext(spark.sparkContext)\njob = Job(glueContext)\njob.init(args['JOB_NAME'], args)\nlogger = glueContext.get_logger()\n\nroute = args['ROUTE']\nhudiStorageType = 'CoW'\ndropColumnList = ['db','table_name','Op']\ndbName = args['DB_NAME']\nformat = args['FORMAT']\nsourceBucketName = args['BUCKET_ORIG']\nconfigBucketName = args['BUCKET_CONF']\ntargetBucketName = args['BUCKET_DEST']\nprefixTable = args['PREFIX_TABLE_DEST']\nsuffixTable = args['SUFIX_TABLE_DEST']\n\n#Tabla ciudad\n\nciudad0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_ciudad_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"ciudad\",\n)\nciudad = ciudad0.toDF()\nciudad = ciudad.select(func.col('id').alias('id_ciudad'),func.col('valor').alias('ds_city_name'),('codigo_dane'),func.length('codigo_dane').alias('length'))\n\nciudad = ciudad.withColumn(\"cd_city_dane_code\", \n func.expr(\"CASE WHEN length = 4 THEN concat('0', codigo_dane) \" + \n \"WHEN length = 4 THEN codigo_dane \" +\n \"ELSE codigo_dane END\"))\n\nciudad = ciudad.select('id_ciudad', 'cd_city_dane_code', 'ds_city_name',func.substring(ciudad.cd_city_dane_code, 1, 2).alias('cd_state_dane_code'))\n\n#Tabla departamento\n\ndepartamento0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_departamento_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"departamento\",\n)\ndepartamento = departamento0.toDF()\ndepartamento = departamento.select(func.col('id').alias('id_departamento'), func.col('valor').alias('ds_state_name'))\n\n#Tabla localidad\n\nlocalidad0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_localidad_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"localidad\",\n)\nlocalidad = localidad0.toDF()\nlocalidad = localidad.select(func.col('id').alias('id_localidad'),func.initcap('valor').alias('ds_district_division'))\n\n#Tabla barrio\n\nbarrio0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_barrio_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"barrio\",\n)\nbarrio = barrio0.toDF()\nbarrio = barrio.select(func.col('id').alias('id_barrio'),func.initcap('valor').alias('ds_neighborhood'))\n\n#Tabla s_estrato\n\nestrato0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_s_estrato_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"s_estrato\",\n)\nestrato = estrato0.toDF()\nestrato = estrato.select(func.col('id').alias('id_estrato'),func.col('valor').alias('cd_neighborhood_economic_level'))\n\n#Tabla usuario\n\nusuario0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_usuario_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"usuario\",\n)\n\nusuario = usuario0.toDF()\nusuario = usuario.select(func.col('id').alias('id_publication_owner_app_identification_number'),func.lower('correo_electronico').alias('ds_publication_owner_email'),'activo','nombre')\n\n#Tabla tipo inmueble\n\ntipo_inmueble0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_s_tipo_inmueble_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"s_tipo_inmueble\",\n)\n\ntipo_inmueble = tipo_inmueble0.toDF()\ntipo_inmueble = tipo_inmueble.select(func.col('id').alias('id_tipo_inmueble'),func.col('valor').alias('ds_property_type'))\n\n#Tabla s_estado_proyecto\n\ns_estado_proyecto0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_s_estado_proyecto_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"s_estado_proyecto\",\n)\n\ns_estado_proyecto = s_estado_proyecto0.toDF()\ns_estado_proyecto = s_estado_proyecto.select(func.col('id').alias('id_estado_proyecto'),func.col('valor').alias('ds_property_project_status'))\n\n#Tabla detalle_caracteristicas_inmueble\n\ndetalle_caracteristicas_inmueble0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_detalle_caracteristicas_inmueble_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"detalle_caracteristicas_inmueble\",\n)\n\ndetalle_caracteristicas_inmueble = detalle_caracteristicas_inmueble0.toDF()\ndetalle_caracteristicas_inmueble = detalle_caracteristicas_inmueble.select('id_inmueble','id_caracteristica','valor')\n\n#Tabla definicion_caracteristicas\n\ndefinicion_caracteristicas0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_definicion_caracteristicas_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"definicion_caracteristicas\",\n)\n\ndefinicion_caracteristicas = definicion_caracteristicas0.toDF()\ndefinicion_caracteristicas = definicion_caracteristicas.select('id_caracteristica','alias')\n\n# Joins\n\ndefinicion_caracteristicas = detalle_caracteristicas_inmueble.join(definicion_caracteristicas, detalle_caracteristicas_inmueble.id_caracteristica == definicion_caracteristicas.id_caracteristica, 'inner')\n\ncaracteristicas_nuevos = definicion_caracteristicas.groupBy('id_inmueble').pivot('alias').agg(func.first('valor').alias('valor')) \n\n#Tabla inmuebles_tipo\n\ninmuebles_tipo0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_inmuebles_tipo_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"inmuebles_tipo\",\n)\n\ninmuebles_tipo = inmuebles_tipo0.toDF()\ninmuebles_tipo = inmuebles_tipo.select('id_inmueble_tipo','id_proyecto','estado',func.col('codigo_tipo_propiedad').alias('id_property_project_building_class'),\n func.col('nombre').alias('ds_property_project_building_class_name'))\ninmuebles_tipo = inmuebles_tipo.withColumn('id_tipologia', func.split(inmuebles_tipo['id_property_project_building_class'], '-').getItem(1))\n\n\ninmuebles_tipo = inmuebles_tipo.withColumn(\"ds_property_project_building_class_status\", \n func.expr(\"CASE WHEN estado = 'A' THEN 'Activo' \" + \n \"WHEN estado = 'I' THEN 'Inactivo' \" +\n \"ELSE 'Eliminado' END\")).drop('estado')\n\ninmuebles_tipo = inmuebles_tipo.join(caracteristicas_nuevos, inmuebles_tipo.id_inmueble_tipo == caracteristicas_nuevos.id_inmueble, \"inner\")\n\n#Tabla proyecto\n\nproyecto0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_proyectos_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"proyectos\",\n)\n\nproyecto = proyecto0.toDF()\nproyecto = proyecto.select(func.col('id_proyecto').alias('id'),\n func.col('codigo_proyecto').alias('id_property_project'),\n 'id_ciudad','id_localidad','id_barrio',\n 'id_departamento','id_tipo_inmueble',\n func.col('direccion').alias('ds_address'),\n func.col('estrato').alias('id_estrato'),\n func.col('latitud').cast('String').alias('ds_latitude'),\n func.col('longitud').cast('String').alias('ds_longitude'),'estado',\n func.initcap('nombre_proyecto').alias('ds_property_project_name'),\n 'id_usuario',\n func.col('fecha_creacion').alias('dt_creation_date'),\n func.col('fecha_modificacion').alias('dt_modification_date'),\n func.col('fecha_entrega').alias('dt_property_project_delivery_date'),'id_estado_proyecto')\n\nproyecto = proyecto.withColumn(\"ds_publication_status\", \n func.expr(\"CASE WHEN estado = 'A' THEN 'Activo' \" + \n \"WHEN estado = 'I' THEN 'Inactivo' \" +\n \"ELSE 'Eliminado' END\")).drop('estado')\n\nproyecto = proyecto.join(s_estado_proyecto, proyecto.id_estado_proyecto == s_estado_proyecto.id_estado_proyecto, 'inner')\n\nproyecto = proyecto.join(inmuebles_tipo, proyecto.id == inmuebles_tipo.id_proyecto, \"inner\")\n\nproyecto = proyecto.select('*',func.concat_ws('-',proyecto.id_property_project,proyecto.id_tipologia).alias('id_publication')) \nproyecto = proyecto.join(usuario, proyecto.id_usuario == usuario.id_publication_owner_app_identification_number, \"inner\")\nproyecto = proyecto.join(estrato, proyecto.id_estrato == estrato.id_estrato, \"left\").drop('id_estrato')\nproyecto = proyecto.join(ciudad, proyecto.id_ciudad == ciudad.id_ciudad, \"left\").drop('id_ciudad')\nproyecto = proyecto.join(departamento, proyecto.id_departamento == departamento.id_departamento, \"left\").drop('id_departamento')\nproyecto = proyecto.join(localidad, proyecto.id_localidad == localidad.id_localidad, \"left\").drop('id_localidad')\nproyecto = proyecto.join(barrio, proyecto.id_barrio == barrio.id_barrio, \"left\").drop('id_barrio')\nproyecto = proyecto.join(tipo_inmueble, proyecto.id_tipo_inmueble == tipo_inmueble.id_tipo_inmueble, \"left\").drop('id_tipo_inmueble')\n\nproyecto = proyecto.withColumn('ds_publication_type', func.lit('Proyecto Inmobiliario'))\\\n .withColumn('ds_publication_site_store',func.lit('Nuevo'))\\\n .withColumn('nm_property_age',func.lit(None))\\\n .withColumn('nm_property_monthly_rent_payment',func.lit(None))\\\n .withColumn('ds_property_real_state_registration_number',func.lit(None))\\\n .withColumn('fl_property_offer',func.lit('No'))\\\n .withColumn('fl_has_gym',func.lit('No'))\\\n .withColumn('nm_elevator_number',func.lit(None))\\\n .withColumn('nm_visitors_parking_number',func.lit(None))\\\n .withColumn('fl_has_reception',func.lit('No'))\\\n .withColumn('fl_has_social_room',func.lit('No'))\\\n .withColumn('fl_has_communal_living',func.lit('No'))\\\n .withColumn('fl_has_children_zone',func.lit('No'))\\\n .withColumn('fl_has_green_zones',func.lit('No'))\\\n .withColumn('fl_has_vigilance', func.lit('No'))\\\n .withColumn('dt_property_inception_date', func.lit(None))\\\n .withColumn('dt_property_expiration_date', func.lit(None))\\\n .withColumn('dt_property_deletion_date', func.lit(None)) \n\nproyecto = proyecto.withColumnRenamed(\"sellingPrice\",\"nm_property_selling_price\")\\\n .withColumnRenamed(\"administrationValue\",\"nm_property_monthly_administration_fee\")\\\n .withColumnRenamed(\"builtArea\",\"nm_built_area\")\\\n .withColumnRenamed(\"privateArea\",\"nm_private_area\")\\\n .withColumnRenamed(\"numParking\",\"nm_parking_number\")\\\n .withColumnRenamed(\"numBedRooms\",\"nm_bedroom_number\")\\\n .withColumnRenamed(\"numBathrooms\",\"nm_bathroom_number\")\\\n .withColumnRenamed(\"balconiesNumber\",\"nm_balcony_number\")\\\n .withColumnRenamed(\"terracesNumber\",\"nm_terrace_number\")\\\n .withColumnRenamed(\"depositsNumber\",\"nm_storage_room_number\")\\\n .withColumn('nm_square_meter_price',func.round(func.col('nm_property_selling_price') / func.col('nm_built_area'), 2))\n \nproyecto = proyecto.withColumn(\"url\",\n func.expr(\"CASE WHEN ds_property_type in ('Casa','Apartaestudio','Apartamento','Finca','Lote') THEN concat('https://www.ciencuadras.com/proyecto-de-vivienda/', replace(trim(lower(nombre)),' ','-'), '-', replace(trim(lower(ds_property_project_name)),' ','-'),'-', replace(trim(lower(ds_city_name)),' ','-'),'-',id ) \" +\n \"ELSE concat('https://www.ciencuadras.com/proyecto-comercial/', replace(trim(lower(nombre)),' ','-'), '-', replace(trim(lower(ds_property_project_name)),' ','-'),'-', replace(trim(lower(ds_city_name)),' ','-'),'-',id ) END\"))\n\nproyecto = proyecto.withColumn(\"tx_url\",func.expr(\"CASE WHEN url is not null THEN translate(url,'áéíóú','aeiou')\" +\n \"ELSE 'https://www.ciencuadras.com/' END\"))\n\nproyecto = proyecto.distinct()\n\nproyectos = proyecto.select('id_publication','ds_publication_status','ds_publication_type','ds_publication_site_store','id_publication_owner_app_identification_number','ds_publication_owner_email',\n 'ds_property_real_state_registration_number','ds_property_type','id_property_project','ds_property_project_name','ds_property_project_status','id_property_project_building_class',\n 'ds_property_project_building_class_name','ds_property_project_building_class_status','cd_state_dane_code','ds_state_name','cd_city_dane_code','ds_city_name','ds_neighborhood',\n 'ds_district_division','cd_neighborhood_economic_level','ds_address','ds_latitude','ds_longitude',\n 'nm_property_selling_price','nm_property_monthly_rent_payment','nm_property_monthly_administration_fee', func.col('nm_square_meter_price').cast('String').alias('nm_square_meter_price'),\n 'nm_built_area','nm_private_area','nm_parking_number','nm_visitors_parking_number','nm_bedroom_number','nm_bathroom_number','nm_property_age','nm_balcony_number',\n 'nm_terrace_number','nm_storage_room_number','nm_elevator_number','allowPets','laundryZone','fl_has_green_zones','fl_has_communal_living','fl_has_children_zone','privatePool',\n 'fl_has_gym','serviceRoom','serviceBathroom','fl_has_social_room','fl_has_reception','airConditioner','homeAppliances','dt_creation_date','dt_modification_date',\n 'dt_property_inception_date','dt_property_expiration_date','dt_property_deletion_date','dt_property_project_delivery_date','fl_property_offer','tx_url','fl_has_vigilance')\n\n#Tabla inmueble\n\ninmueble0 = gl2.read_data_2(\n spark,\n glueContext= glueContext,\n s3_url= f\"s3://{sourceBucketName}/{route}/{dbName}/ciencuadras_curated_inmueble_glue_tb/\",\n data_type= \"hudi\",\n table_name= \"inmueble\",\n)\ninmueble = inmueble0.toDF()\ninmueble = inmueble.select(func.col('id'),\n func.col('id_depto').alias('id_departamento'),\n 'id_ciudad','id_localidad','id_barrio','id_tipo_inmueble','id_tipo_transaccion','id_usuario',\n func.col('direccion').alias('ds_address'),\n func.col('estrato').alias('id_estrato'),\n func.col('codigo').alias('id_publication'),\n func.col('latitud').alias('ds_latitude'),\n func.col('longitud').alias('ds_longitude'),\n func.col('precio_venta').cast('String').alias('nm_property_selling_price'),\n func.col('canon_arrendamiento').cast('String').alias('nm_property_monthly_rent_payment'),\n func.col('valor_administracion').cast('String').alias('nm_property_monthly_administration_fee'),\n func.abs('num_parqueaderos').cast('Integer').alias('nm_parking_number'),\n func.abs('num_habitaciones').cast('Integer').alias('nm_bedroom_number'),\n func.abs('num_banos').cast('Integer').alias('nm_bathroom_number'),\n func.abs('area_bodega').alias('area_bodega'),\n func.abs('area_oficina').alias('area_oficina'),\n func.abs('area_lote').alias('area_lote'), \n func.abs('area_construida').alias('area_construida'),\n func.abs('area_privada').alias('nm_private_area'),\n func.abs('antiguedad').cast('Integer').alias('nm_property_age'),\n func.col('cuarto_servicio').cast('String').alias('serviceRoom'),\n func.col('bano_servicio').cast('String').alias('serviceBathroom'),\n func.col('zona_lavanderia').cast('String').alias('laundryZone'),\n func.col('aire_acondicionado').cast('String').alias('airConditioner'),\n func.col('electrodomesticos').alias('homeAppliances'),\n func.abs('num_balcones').cast('Integer').alias('nm_balcony_number'),\n func.abs('num_terraza').cast('Integer').alias('nm_terrace_number'),\n func.abs('num_depositos').cast('Integer').alias('nm_storage_room_number'),\n func.abs('num_ascensores').cast('Integer').alias('nm_elevator_number'),\n func.abs('num_parqueaderos_visitantes').cast('Integer').alias('nm_visitors_parking_number'),\n func.col('recepcion').alias('fl_has_reception'),\n func.col('sede_social').alias('fl_has_social_room'),\n func.col('salon_comunal').alias('fl_has_communal_living'),\n func.col('zona_infantil').alias('fl_has_children_zone'),\n func.col('zonas_verdes').alias('fl_has_green_zones'),\n func.col('piscina_comunal').alias('privatePool'),\n func.col('gimnasio').alias('fl_has_gym'),\n func.col('fecha_creacion').alias('dt_creation_date'),\n func.col('fecha_modificacion').alias('dt_modification_date'),\n func.col('permite_mascotas').cast('String').alias('allowPets'),\n func.col('matricula_inmobiliaria').alias('ds_property_real_state_registration_number'),\n func.col('enoferta').alias('fl_property_offer'),\n func.col('start_publicacion').alias('dt_property_inception_date'),\n func.col('end_publicacion').alias('dt_property_expiration_date'),\n func.col('fecha_eliminacion').alias('dt_property_deletion_date'),\n 'nombre_proyecto',\n 'proyecto',\n 'vigilancia',\n 'activo')\n\ninmueble = inmueble.join(estrato, inmueble.id_estrato == estrato.id_estrato, \"left\").drop('id_estrato')\ninmueble = inmueble.join(ciudad, inmueble.id_ciudad == ciudad.id_ciudad, \"left\").drop('id_ciudad')\ninmueble = inmueble.join(departamento, inmueble.id_departamento == departamento.id_departamento, \"left\").drop('id_departamento')\ninmueble = inmueble.join(localidad, inmueble.id_localidad == localidad.id_localidad, \"left\").drop('id_localidad')\ninmueble = inmueble.join(barrio, inmueble.id_barrio == barrio.id_barrio, \"left\").drop('id_barrio')\ninmueble = inmueble.join(tipo_inmueble, inmueble.id_tipo_inmueble == tipo_inmueble.id_tipo_inmueble, \"left\").drop('id_tipo_inmueble')\n\ninmueble = inmueble.fillna(value=0,subset=['area_bodega']).fillna(value=0,subset=['area_oficina']).fillna(value=0,subset=['area_lote']).fillna(value=0,subset=['area_construida']).fillna(value=0,subset=['nm_private_area'])\n\ninmueble = inmueble.withColumn('nm_built_area', func.expr(\"CASE WHEN area_construida > 0 THEN area_construida \" + \n \"WHEN area_construida <= 0 and area_bodega > 0 THEN area_bodega \" +\n \"WHEN area_construida <= 0 and area_lote > 0 THEN area_lote \" +\n \"WHEN area_construida <= 0 and area_oficina > 0 THEN area_oficina \" + \n \"ELSE area_construida END\"))\n\ninmueble = inmueble.withColumn(\"ds_publication_status\", func.expr(\"CASE WHEN activo = '0' THEN 'Activo' \" + \n \"WHEN activo = '1' THEN 'Inactivo' \" +\n \"WHEN activo = '2' THEN 'Eliminado' \" +\n \"WHEN activo = '4' THEN 'Repetido' \" + \n \"ELSE 'Otro' END\")).drop('activo')\n\ninmueble = inmueble.withColumn(\"ds_publication_site_store\", func.expr(\"CASE WHEN id_tipo_transaccion = '1' THEN 'Venta' \" + \n \"WHEN id_tipo_transaccion = '2' THEN 'Arriendo' \" +\n \"WHEN id_tipo_transaccion = '3' THEN 'Arriendo o venta' \" + \n \"WHEN id_tipo_transaccion = '4' THEN 'Agenda' \" + \n \"ELSE 'Otro' END\")).drop('id_tipo_transaccion')\n \ninmueble = inmueble.withColumn(\"fl_has_vigilance\", func.expr(\"CASE WHEN vigilancia in ('1','2') THEN 'Si' \" + \n \"ELSE 'No' END\")) \n\ninmueble = inmueble.withColumn('dt_property_project_delivery_date', func.lit(None))\\\n .withColumn('ds_publication_type', func.lit('Inmueble'))\\\n .withColumn('ds_property_project_building_class_name',func.lit(None))\\\n .withColumn('ds_property_project_building_class_status',func.lit(None))\\\n .withColumn('ds_property_project_name',func.lit(None))\\\n .withColumn('ds_property_project_status',func.lit(None))\\\n .withColumn('id_property_project',func.lit(None))\\\n .withColumn('id_property_project_building_class',func.lit(None))\\\n .withColumn('nm_square_meter_price',func.round(func.col('nm_property_selling_price') / func.col('nm_built_area'), 2))\n\ninmueble = inmueble.join(usuario, inmueble.id_usuario == usuario.id_publication_owner_app_identification_number, \"inner\")\ninmueble = inmueble.distinct()\n\ninmueble = inmueble.withColumn(\"url\",\n func.expr(\"CASE WHEN proyecto = 0 THEN concat('https://www.ciencuadras.com/inmueble/',lower(ds_property_type),'-en-',replace(trim(lower(ds_publication_site_store)),' ','-'),'-en-',replace(lower(ds_neighborhood),' ','-'),'-',replace(trim(lower(ds_city_name)),' ','-'),'-',id ) \" +\n \"ELSE concat('https://www.ciencuadras.com/proyecto/proyecto-',replace(trim(lower(nombre_proyecto)),' ','-'),'-en-',replace(trim(lower(ds_neighborhood)),' ','-'),'-',replace(trim(lower(ds_city_name)),' ','-'),'-',id ) END\"))\n \ninmueble = inmueble.withColumn(\"tx_url\",func.expr(\"CASE WHEN url is not null THEN translate(url,'áéíóú','aeiou')\" +\n \"ELSE 'https://www.ciencuadras.com/' END\")) \n\ninmuebles = inmueble.select('id_publication','ds_publication_status','ds_publication_type','ds_publication_site_store',\n 'id_publication_owner_app_identification_number','ds_publication_owner_email',\n 'ds_property_real_state_registration_number','ds_property_type','id_property_project',\n 'ds_property_project_name','ds_property_project_status','id_property_project_building_class',\n 'ds_property_project_building_class_name','ds_property_project_building_class_status',\n 'cd_state_dane_code','ds_state_name','cd_city_dane_code','ds_city_name','ds_neighborhood',\n 'ds_district_division','cd_neighborhood_economic_level','ds_address','ds_latitude','ds_longitude',\n 'nm_property_selling_price','nm_property_monthly_rent_payment','nm_property_monthly_administration_fee', \n func.col('nm_square_meter_price').cast('String').alias('nm_square_meter_price'),\n func.col('nm_built_area').cast('String').alias('nm_built_area'),\n func.col('nm_private_area').cast('String').alias('nm_private_area'),\n func.col('nm_parking_number').cast('String').alias('nm_parking_number'),\n func.col('nm_visitors_parking_number').cast('String').alias('nm_visitors_parking_number'),\n func.col('nm_bedroom_number').cast('String').alias('nm_bedroom_number'),\n func.col('nm_bathroom_number').cast('String').alias('nm_bathroom_number'),\n func.col('nm_property_age').cast('String').alias('nm_property_age'),\n func.col('nm_balcony_number').cast('String').alias('nm_balcony_number'),\n func.col('nm_terrace_number').cast('String').alias('nm_terrace_number'),\n func.col('nm_storage_room_number').cast('String').alias('nm_storage_room_number'), \n func.col('nm_elevator_number').cast('String').alias('nm_elevator_number'),\n 'allowPets','laundryZone','fl_has_green_zones','fl_has_communal_living','fl_has_children_zone',\n 'privatePool','fl_has_gym','serviceRoom','serviceBathroom','fl_has_social_room','fl_has_reception',\n 'airConditioner','homeAppliances','dt_creation_date','dt_modification_date',\n 'dt_property_inception_date','dt_property_expiration_date','dt_property_deletion_date',\n 'dt_property_project_delivery_date','fl_property_offer','tx_url','fl_has_vigilance')\n\n\ntabla = inmuebles.union(proyectos)\ntabla = tabla.na.drop(subset=[\"id_publication\"])\ntabla = tabla.distinct()\n\ntabla = tabla.withColumn(\"fl_is_pet_allowed\", func.expr(\"CASE WHEN allowPets = '0' THEN 'No' \" + \n \"WHEN allowPets = '1' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('allowPets')\n \ntabla = tabla.withColumn(\"fl_has_laundry_area\", func.expr(\"CASE WHEN laundryZone = '0' or laundryZone = 'false' THEN 'No' \" + \n \"WHEN laundryZone = '1' or laundryZone = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('laundryZone') \n\ntabla = tabla.withColumn(\"fl_has_green_zones\", func.expr(\"CASE WHEN fl_has_green_zones = '0' or fl_has_green_zones = 'false' THEN 'No' \" + \n \"WHEN fl_has_green_zones = '1' or fl_has_green_zones = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\"))\n\ntabla = tabla.withColumn(\"fl_has_communal_living\", func.expr(\"CASE WHEN fl_has_communal_living = '0' or fl_has_communal_living = 'false' THEN 'No' \" + \n \"WHEN fl_has_communal_living = '1' or fl_has_communal_living = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")) \n\ntabla = tabla.withColumn(\"fl_has_children_zone\", func.expr(\"CASE WHEN fl_has_children_zone = '0' or fl_has_children_zone = 'false' THEN 'No' \" + \n \"WHEN fl_has_children_zone = '1' or fl_has_children_zone = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\"))\n\ntabla = tabla.withColumn(\"fl_has_pool\", func.expr(\"CASE WHEN privatePool = '0' or privatePool = 'false' THEN 'No' \" + \n \"WHEN privatePool = '1' or privatePool = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('privatePool') \n\ntabla = tabla.withColumn(\"fl_has_gym\", func.expr(\"CASE WHEN fl_has_gym = '0' or fl_has_gym = 'false' THEN 'No' \" + \n \"WHEN fl_has_gym = '1' or fl_has_gym = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\"))\n\ntabla = tabla.withColumn(\"fl_has_service_room\", func.expr(\"CASE WHEN serviceRoom = '0' or serviceRoom = 'false' THEN 'No' \" + \n \"WHEN serviceRoom = '1' or serviceRoom = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('serviceRoom')\n\ntabla = tabla.withColumn(\"fl_has_service_bathroom\", func.expr(\"CASE WHEN serviceBathroom = '0' or serviceBathroom = 'false' THEN 'No' \" + \n \"WHEN serviceBathroom = '1' or serviceBathroom = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('serviceBathroom')\n\ntabla = tabla.withColumn(\"fl_has_reception\", func.expr(\"CASE WHEN fl_has_reception = '0' or fl_has_reception = 'false' THEN 'No' \" + \n \"WHEN fl_has_reception = '1' or fl_has_reception = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\"))\n\ntabla = tabla.withColumn(\"fl_has_air_conditioner\", func.expr(\"CASE WHEN airConditioner = '0' or airConditioner = 'false' THEN 'No' \" + \n \"WHEN airConditioner = '1' or airConditioner = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('airConditioner')\n \ntabla = tabla.withColumn(\"fl_is_property_offer\", func.expr(\"CASE WHEN fl_property_offer = '0' or fl_property_offer = 'false' THEN 'No' \" + \n \"WHEN fl_property_offer = '1' or fl_property_offer = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\")).drop('fl_property_offer')\n\ntabla = tabla.withColumn(\"fl_has_social_room\", func.expr(\"CASE WHEN fl_has_social_room = '0' or fl_has_social_room = 'false' THEN 'No' \" + \n \"WHEN fl_has_social_room = '1' or fl_has_social_room = 'true' THEN 'Si' \" +\n \"ELSE 'No' END\"))\n\ntabla = tabla.withColumn(\"fl_has_home_appliances\", func.expr(\"CASE WHEN homeAppliances is null or homeAppliances = '0' THEN 'No' \" + \n \"WHEN homeAppliances = '1' THEN 'Si' \" +\n \"ELSE 'Si' END\")).drop('homeAppliances')\n\ntabla = tabla.withColumn('dt_product_date_time', func.current_timestamp()).withColumn('dt_product_hudi_date_time', func.current_timestamp())\n\ntabla = tabla.withColumn('dt_creation_date', func.to_timestamp('dt_creation_date', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('dt_modification_date', func.to_timestamp('dt_modification_date', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('dt_property_inception_date', func.to_timestamp('dt_property_inception_date', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('dt_property_expiration_date', func.to_timestamp('dt_property_expiration_date', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('dt_property_deletion_date', func.to_timestamp('dt_property_deletion_date', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('dt_property_project_delivery_date', func.to_timestamp('dt_property_project_delivery_date', 'yyyy-MM-ddHH:mm:ss.SSSZ' ))\n\ntabla = tabla.withColumn('dt_product_date_time', func.to_timestamp('dt_product_date_time', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('dt_product_hudi_date_time', func.to_timestamp('dt_product_hudi_date_time', 'yyyy-MM-ddHH:mm:ss.SSSZ'))\ntabla = tabla.withColumn('nm_publication_days_since_posted', func.datediff(func.current_date(),func.col(\"dt_creation_date\")))\n\ntable_pp = tabla.select('id_publication','ds_publication_status','ds_publication_type','ds_publication_site_store',\n func.col('id_publication_owner_app_identification_number').cast('String').alias('id_publication_owner_app_identification_number'),\n 'ds_publication_owner_email','ds_property_real_state_registration_number','ds_property_type','id_property_project','ds_property_project_name',\n 'ds_property_project_status','id_property_project_building_class','ds_property_project_building_class_name','ds_property_project_building_class_status',\n 'fl_is_property_offer','cd_state_dane_code','ds_state_name','cd_city_dane_code','ds_city_name','ds_neighborhood','ds_district_division',\n 'cd_neighborhood_economic_level','ds_address','ds_latitude','ds_longitude',\n func.col('nm_property_selling_price').cast('Decimal').alias('nm_property_selling_price'),\n func.col('nm_property_monthly_rent_payment').cast('Decimal').alias('nm_property_monthly_rent_payment'),\n func.col('nm_property_monthly_administration_fee').cast('Decimal').alias('nm_property_monthly_administration_fee'),\n func.col('nm_square_meter_price').cast('Decimal').alias('nm_square_meter_price'),\n func.col('nm_built_area').cast('Float').alias('nm_built_area'),\n func.col('nm_private_area').cast('Float').alias('nm_private_area'),\n func.col('nm_parking_number').cast('Integer').alias('nm_parking_number'),\n func.col('nm_visitors_parking_number').cast('Integer').alias('nm_visitors_parking_number'),\n func.col('nm_bedroom_number').cast('Integer').alias('nm_bedroom_number'),\n func.col('nm_bathroom_number').cast('Integer').alias('nm_bathroom_number'),\n func.col('nm_property_age').cast('Integer').alias('nm_property_age'),\n func.col('nm_balcony_number').cast('Integer').alias('nm_balcony_number'),\n func.col('nm_terrace_number').cast('Integer').alias('nm_terrace_number'),\n 'fl_is_pet_allowed','fl_has_laundry_area','fl_has_green_zones',\n func.col('nm_elevator_number').cast('Integer').alias('nm_elevator_number'),\n 'fl_has_communal_living','fl_has_children_zone','fl_has_pool','fl_has_gym',\n 'fl_has_service_room','fl_has_service_bathroom','fl_has_social_room','fl_has_reception',\n func.col('nm_storage_room_number').cast('Integer').alias('nm_storage_room_number'),\n 'fl_has_air_conditioner','fl_has_home_appliances','fl_has_vigilance',\n func.col('nm_publication_days_since_posted').cast('Integer').alias('nm_publication_days_since_posted'),\n 'dt_property_project_delivery_date',\n 'tx_url',\n 'dt_creation_date',\n 'dt_modification_date',\n 'dt_property_inception_date',\n 'dt_property_expiration_date',\n 'dt_property_deletion_date',\n 'dt_product_date_time',\n 'dt_product_hudi_date_time')\n\ntable_pp = table_pp.fillna(value=0,subset=['nm_parking_number']).fillna(value=0,subset=['nm_visitors_parking_number'])\\\n .fillna(value=0,subset=['nm_bedroom_number']).fillna(value=0,subset=['nm_bathroom_number']).fillna(value=0,subset=['nm_balcony_number'])\\\n .fillna(value=0,subset=['nm_terrace_number']).fillna(value=0,subset=['nm_elevator_number']).fillna(value=0,subset=['nm_storage_room_number'])\n\ntable_pp = table_pp.distinct()\ntable_pp = table_pp.na.drop(subset=['id_publication','ds_publication_type','ds_publication_site_store','id_publication_owner_app_identification_number','ds_publication_owner_email','ds_state_name','ds_city_name'])\n\nschema = func.StructType([\n StructField('id_publication', StringType(), False),\n StructField('ds_publication_status', StringType(), True),\n StructField('ds_publication_type', StringType(), False),\n StructField('ds_publication_site_store', StringType(), False),\n StructField('id_publication_owner_app_identification_number', StringType(), False),\n StructField('ds_publication_owner_email', StringType(), False),\n StructField('ds_property_real_state_registration_number', StringType(), True),\n StructField('ds_property_type', StringType(), True), \n StructField('id_property_project', StringType(), True),\n StructField('ds_property_project_name', StringType(), True),\n StructField('ds_property_project_status', StringType(), True),\n StructField('id_property_project_building_class', StringType(), True),\n StructField('ds_property_project_building_class_name', StringType(), True),\n StructField('ds_property_project_building_class_status', StringType(), True),\n StructField('fl_is_property_offer', StringType(), True),\n StructField('cd_state_dane_code', StringType(), False),\n StructField('ds_state_name', StringType(), False),\n StructField('cd_city_dane_code', StringType(), False),\n StructField('ds_city_name', StringType(), False),\n StructField('ds_neighborhood', StringType(), True),\n StructField('ds_district_division', StringType(), True),\n StructField('cd_neighborhood_economic_level', StringType(), True),\n StructField('ds_address', StringType(), True),\n StructField('ds_latitude', StringType(), True),\n StructField('ds_longitude', StringType(), True),\n StructField('nm_property_selling_price', DecimalType(18,2), False),\n StructField('nm_property_monthly_rent_payment', DecimalType(18,2), False),\n StructField('nm_property_monthly_administration_fee', DecimalType(18,2), False),\n StructField('nm_square_meter_price', DecimalType(18,2), False),\n StructField('nm_built_area', FloatType(), False),\n StructField('nm_private_area', FloatType(), False), \n StructField('nm_parking_number', IntegerType(), True),\n StructField('nm_visitors_parking_number', IntegerType(), True),\n StructField('nm_bedroom_number', IntegerType(), True),\n StructField('nm_bathroom_number', IntegerType(), True),\n StructField('nm_property_age', IntegerType(), True),\n StructField('nm_balcony_number', IntegerType(), True),\n StructField('nm_terrace_number', IntegerType(), True), \n StructField('fl_is_pet_allowed', StringType(), True),\n StructField('fl_has_laundry_area', StringType(), True),\n StructField('fl_has_green_zones', StringType(), True),\n StructField('nm_elevator_number', IntegerType(), True),\n StructField('fl_has_communal_living', StringType(), True),\n StructField('fl_has_children_zone', StringType(), True),\n StructField('fl_has_pool', StringType(), True),\n StructField('fl_has_gym', StringType(), True),\n StructField('fl_has_service_room', StringType(), True),\n StructField('fl_has_service_bathroom', StringType(), True),\n StructField('fl_has_social_room', StringType(), True),\n StructField('fl_has_reception', StringType(), True),\n StructField('nm_storage_room_number', IntegerType(), True), \n StructField('fl_has_air_conditioner', StringType(), True),\n StructField('fl_has_home_appliances', StringType(), True), \n StructField('fl_has_vigilance', StringType(), True),\n StructField('nm_publication_days_since_posted', IntegerType(), True), \n StructField('dt_property_project_delivery_date', TimestampType(), True),\n StructField('tx_url', StringType(), True),\n StructField('dt_creation_date', TimestampType(), False),\n StructField('dt_modification_date', TimestampType(), True),\n StructField('dt_property_inception_date', TimestampType(), True),\n StructField('dt_property_expiration_date', TimestampType(), True),\n StructField('dt_property_deletion_date', TimestampType(), True),\n StructField('dt_product_date_time', TimestampType(), True),\n StructField('dt_product_hudi_date_time', TimestampType(), True)\n ])\n\nemptyRDD = spark.sparkContext.emptyRDD()\nproperties_projects = spark.createDataFrame(emptyRDD, schema) \nproperties_projects = properties_projects.union(table_pp)\n\nciencuadras_products_properties_and_projects_glue_tb = DynamicFrame.fromDF(properties_projects, glueContext, \"ciencuadras_products_properties_and_projects_glue_tb\")\n\ngl2.upsert_hudi_table(\n spark_dyf = ciencuadras_products_properties_and_projects_glue_tb,\n glue_database = f\"{dbName}\",\n table_name = \"ciencuadras_products_properties_and_projects_glue_tb\",\n record_id = 'id_publication',\n precomb_key = 'dt_product_hudi_date_time',\n overwrite_precomb_key = True,\n target_path = f\"s3://{targetBucketName}/{route}/{dbName}/{prefixTable}properties_and_projects{suffixTable}/\",\n)","repo_name":"dfnietop/terraform","sub_path":"files/glueAssets/glueScripts/producto_inmueble.py","file_name":"producto_inmueble.py","file_ext":"py","file_size_in_byte":42787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26463580866","text":"import dash\nfrom dash import dcc\nfrom dash import html\nfrom dash.dependencies import Input, Output\nfrom PySide6.QtCore import QObject, Signal\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\nfrom werkzeug.serving import run_simple\nimport tools.afcCalculationV2 as afcCalculation\nimport plotly_express as px\nimport pandas as pd\nfrom flask import request\n\n\n\nclass Server(QObject):\n shut_down_signal = Signal()\n\n def __init__(self, port, filepath):\n super().__init__()\n self.port = port\n self.filepath = filepath\n self.out_recent = pd.DataFrame\n self.out_far = pd.DataFrame\n\n # external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n # self.app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n self.app = dash.Dash(__name__)\n self.appSetLayout()\n\n def run(self):\n self.app.run_server(debug=False, port=self.port)\n # run_simple('localhost', port=self.port, application=DispatcherMiddleware(self.app.server))\n print(\"服务器已运行\")\n\n def appSetLayout(self):\n fig = self.generateGraphics()\n shut_down_signal = self.shut_down_signal\n\n self.app.layout = html.Div([\n # # represents the URL bar, doesn't render anything\n dcc.Location(id='url', refresh=False),\n html.Div(id='page-content'),\n dcc.Graph(figure=fig['近期进站清单']),\n dcc.Graph(figure=fig['远期进站清单']),\n ])\n\n @self.app.callback(dash.dependencies.Output('page-content', 'children'), [dash.dependencies.Input('url', 'pathname')])\n def display_page(pathname):\n if pathname == '/shutdown':\n shutdown()\n shut_down_signal.emit()\n return html.Div([\n html.H5('视图界面')\n ])\n\n def shutdown():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n def generateGraphics(self):\n \"\"\"\n 获取要展示的图表\n\n :return:\n \"\"\"\n filepath = self.filepath\n\n # 提取近期远期以及手动调整表格的内容\n para, ridershipAddress = afcCalculation.loadParam(filepath)\n table_recent = afcCalculation.AFC_project(file_path=ridershipAddress, tab_names=[\"近期早高峰客流\", \"近期晚高峰客流\"], parameter_list=para).run()\n table_far = afcCalculation.AFC_project(file_path=ridershipAddress, tab_names=[\"远期早高峰客流\", \"远期晚高峰客流\"], parameter_list=para).run()\n manual_df_recent = afcCalculation.get_dataFrame('output_recent', filepath)\n if manual_df_recent is None:\n manual_df_recent = table_recent[3].copy()\n\n manual_df_far = afcCalculation.get_dataFrame('output_far', filepath)\n if manual_df_far is None:\n manual_df_far = table_far[3].copy()\n\n d0 = table_recent[0].index.to_series()\n d1 = table_recent[1]['MAX(C进)']\n d2 = table_recent[2]['进站检票机\\n(MAX)']\n d3 = table_recent[3]['进站检票机']\n d4 = manual_df_recent['进站检票机']\n self.out_recent: pd.Dataframe = pd.concat([d0, d1, d2, d3, d4], axis=1, join=\"outer\")\n self.out_recent.columns = [\"车站名\", \"MAX(C进)\", \"进站检票机\\n(MAX)\", \"进站检票机(计算值)\", \"进站检票机(提资值)\"]\n\n d0 = table_far[0].index.to_series()\n d1 = table_far[1]['MAX(C进)']\n d2 = table_far[2]['进站检票机\\n(MAX)']\n d3 = table_far[3]['进站检票机']\n d4 = manual_df_far['进站检票机']\n self.out_far: pd.Dataframe = pd.concat([d0, d1, d2, d3, d4], axis=1, join=\"outer\")\n self.out_far.columns = [\"车站名\", \"MAX(C进)\", \"进站检票机\\n(MAX)\", \"进站检票机(计算值)\", \"进站检票机(提资值)\"]\n\n fig1 = px.parallel_categories(self.out_recent)\n fig2 = px.parallel_categories(self.out_far)\n\n fig = {\n '近期进站清单': fig1,\n '远期进站清单': fig2,\n }\n\n return fig\n\nif __name__ == \"__main__\":\n server = Server(8085, \"/天津1号线/测试项目01.afc\")\n server.run()","repo_name":"pc007007/ZDHCal","sub_path":"widgets/module/afc/contentWidget_afc/dashApp.py","file_name":"dashApp.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"42015624882","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Twitter Sentiment Analysis\n\n# ## Loading Libraries and Data\n\n# In[2]:\n\n\nimport re # for regular expressions\nimport pandas as pd \npd.set_option(\"display.max_colwidth\", 200)\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport seaborn as sns\nimport string\nimport nltk # for text manipulation\nimport warnings \nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[3]:\n\n\ntrain = pd.read_csv(r'C:\\Users\\user\\Desktop\\Data\\train_E6oV3lV.csv') \ntest = pd.read_csv(r'C:\\Users\\user\\Desktop\\Data\\test_tweets_anuFYb8.csv')\n\n\n# # Text PreProcessing and Cleaning\n\n# #### Data Inspection\n\n# In[4]:\n\n\n##non racist/sexist tweets.\ntrain[train['label'] == 0].head(10)\n\n\n# In[5]:\n\n\n##racist/sexist tweets\ntrain[train['label'] == 1].head(10)\n\n\n# In[6]:\n\n\ntrain.shape, test.shape ##dimensions of the train and test dataset.\n\n\n# In[7]:\n\n\ntrain[\"label\"].value_counts() ##label-distribution in the train dataset.\n\n\n# In[8]:\n\n\ntemp = train.groupby('label').count()['id'].reset_index().sort_values(by='id',ascending=False)\ntemp.style.background_gradient(cmap='Purples')\n\n\n# In[9]:\n\n\n##distribution of length of the tweets, in terms of words, in both train and test data.\n\nlength_train = train['tweet'].str.len()\nlength_test = test['tweet'].str.len()\n\nplt.hist(length_train, bins=20, label=\"train_tweets\")\nplt.hist(length_test, bins=20, label=\"test_tweets\")\nplt.legend()\nplt.show()\n\n\n# In[10]:\n\n\nimport seaborn as sns\nplt.figure(figsize=(12,6))\nsns.countplot(x='label',data=train)\n\n\n# In[11]:\n\n\n##funnel-chart\nfrom plotly import graph_objs as go\n\nfig = go.Figure(go.Funnelarea(\n text =temp.label,\n values = temp.id,\n title = {\"position\": \"top center\", \"text\": \"Funnel-Chart of Sentiment Distribution\"}\n ))\nfig.show()\n\n\n# ## Data Cleaning\n\n# In[13]:\n\n\ncombi = train.append(test, ignore_index=True)\ncombi.shape\n\n\n# In[14]:\n\n\n##user-defined function to remove unwanted text patterns from the tweets.\n\ndef remove_pattern(input_txt, pattern):\n r = re.findall(pattern, input_txt)\n for i in r:\n input_txt = re.sub(i, '', input_txt)\n \n return input_txt\n\n\n# ### 1. Removing Twitter Handles (@user)\n\n# In[15]:\n\n\ncombi['tidy_tweet'] = np.vectorize(remove_pattern)(combi['tweet'], \"@[\\w]*\") \ncombi.head()\n\n\n# ### 2. Removing Punctuations, Numbers, and Special Characters\n\n# In[16]:\n\n\ncombi['tidy_tweet'] = combi['tidy_tweet'].str.replace(\"[^a-zA-Z#]\", \" \")\ncombi.head(10)\n\n\n# ### 3. Removing Short Words\n\n# In[17]:\n\n\ncombi['tidy_tweet'] = combi['tidy_tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))\n\n\n# In[18]:\n\n\ncombi.head()\n\n\n# ### 4. Text Normalization\n\n# In[19]:\n\n\ntokenized_tweet = combi['tidy_tweet'].apply(lambda x: x.split()) # tokenizing\ntokenized_tweet.head()\n\n\n# ###### normalize the tokenized tweets.\n\n# In[20]:\n\n\nfrom nltk.stem.porter import *\nstemmer = PorterStemmer()\n\ntokenized_tweet = tokenized_tweet.apply(lambda x: [stemmer.stem(i) for i in x]) # stemming\nprint(tokenized_tweet)\n\n\n# ###### stitch these tokens back together.\n\n# In[21]:\n\n\nfor i in range(len(tokenized_tweet)):\n tokenized_tweet[i] = ' '.join(tokenized_tweet[i])\n \ncombi['tidy_tweet'] = tokenized_tweet\n\n\n# In[22]:\n\n\nprint(tokenized_tweet)\n\n\n# In[ ]:\n\n\n\n\n\n# ## Visualization from Tweets\n\n# ### A) Understanding the common words used in the tweets: WordCloud\n\n# In[23]:\n\n\nall_words = ' '.join([text for text in combi['tidy_tweet']])\nfrom wordcloud import WordCloud\nwordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(all_words)\n\nplt.figure(figsize=(10, 7))\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis('off')\nplt.show()\n\n\n# ### B) Words in non racist/sexist tweets\n\n# In[24]:\n\n\nnormal_words =' '.join([text for text in combi['tidy_tweet'][combi['label'] == 0]])\n\nwordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(normal_words)\nplt.figure(figsize=(10, 7))\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis('off')\nplt.show()\n\n\n# ### C) Racist/Sexist Tweets\n\n# In[25]:\n\n\nnegative_words = ' '.join([text for text in combi['tidy_tweet'][combi['label'] == 1]])\nwordcloud = WordCloud(width=800, height=500,\nrandom_state=21, max_font_size=110).generate(negative_words)\nplt.figure(figsize=(10, 7))\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis('off')\nplt.show()\n\n\n# ### D) Understanding the impact of Hashtags on tweets sentiment\n\n# In[29]:\n\n\n# function to collect hashtags\ndef hashtag_extract(x):\n hashtags = []\n # Loop over the words in the tweet\n for i in x:\n ht = re.findall(r\"#(\\w+)\", i)\n hashtags.append(ht)\n\n return hashtags\n\n\n# In[30]:\n\n\n# extracting hashtags from non racist/sexist tweets\n\nHT_regular = hashtag_extract(combi['tidy_tweet'][combi['label'] == 0])\n\n# extracting hashtags from racist/sexist tweets\nHT_negative = hashtag_extract(combi['tidy_tweet'][combi['label'] == 1])\n\n# unnesting list\nHT_regular = sum(HT_regular,[])\nHT_negative = sum(HT_negative,[])\n\n\n# ### Non-Racist/Sexist Tweets\n\n# In[31]:\n\n\na = nltk.FreqDist(HT_regular)\nd = pd.DataFrame({'Hashtag': list(a.keys()),\n 'Count': list(a.values())})\n\n# selecting top 20 most frequent hashtags \nd = d.nlargest(columns=\"Count\", n = 20) \nplt.figure(figsize=(16,5))\nax = sns.barplot(data=d, x= \"Hashtag\", y = \"Count\")\nax.set(ylabel = 'Count')\nplt.show()\n\n\n# In[32]:\n\n\nimport plotly.express as px\nfig = px.treemap(d, path=['Hashtag'], values='Count',title='Tree of Positive Words')\nfig.show()\n\n\n# ### Racist/Sexist Tweets\n\n# In[33]:\n\n\n\nb = nltk.FreqDist(HT_negative)\ne = pd.DataFrame({'Hashtag': list(b.keys()), 'Count': list(b.values())})\n\n# selecting top 20 most frequent hashtags\ne = e.nlargest(columns=\"Count\", n = 20) \nplt.figure(figsize=(16,5))\nax = sns.barplot(data=e, x= \"Hashtag\", y = \"Count\")\n\n\n# In[34]:\n\n\nimport plotly.express as px\nfig = px.treemap(e, path=['Hashtag'], values='Count',title='Tree of Negative Words')\nfig.show()\n\n\n# ## Word Embeddings\n\n# ##### Word2Vec Embeddings\n\n# In[36]:\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nimport gensim\n\ntokenized_tweet = combi['tidy_tweet'].apply(lambda x: x.split()) # tokenizing\n\nmodel_w2v = gensim.models.Word2Vec(\n tokenized_tweet,\n size=200, # desired no. of features/independent variables \n window=5, # context window size\n min_count=2,\n sg = 1, # 1 for skip-gram model\n hs = 0,\n negative = 10, # for negative sampling\n workers= 2, # no.of cores\n seed = 34)\n\nmodel_w2v.train(tokenized_tweet, total_examples= len(combi['tidy_tweet']), epochs=20)\n\n\n# In[38]:\n\n\nmodel_w2v.wv.most_similar(positive=\"dinner\")\n\n\n# In[39]:\n\n\nmodel_w2v.wv.most_similar(positive=\"trump\")\n\n\n# In[43]:\n\n\nmodel_w2v.doesnt_match('breakfast cereal dinner lunch'.split())\n\n\n# In[40]:\n\n\nmodel_w2v['food']\n\n\n# In[41]:\n\n\nlen(model_w2v['food']) #The length of the vector is 200\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"tanvi-jadhav/Twitter_Sentiment_Analysis","sub_path":"TSA.py","file_name":"TSA.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27739665909","text":"import os\nimport shutil\nimport tempfile\nimport zipfile\nimport ConfigParser\nimport logging\nfrom fnmatch import fnmatch\n\nfrom sugar.activity import activity\nfrom sugar.bundle import activitybundle\nfrom sugar.datastore import datastore\nfrom sugar import profile\n\nDOMAIN_PREFIX = 'org.sugarlabs.ssb'\n\nIGNORE_DIRS = ['dist', '.git']\nIGNORE_FILES = ['.gitignore', 'MANIFEST', '*.pyc', '*~', '*.bak', \n 'pseudo.po', '.DS_STORE']\n\ndef get_is_ssb(activity):\n '''determine if the activity is an SSB'''\n return activity.get_bundle_id().startswith(DOMAIN_PREFIX)\n \ndef copy_profile():\n '''get the data from the bundle and into the profile'''\n ssb_data_path = os.path.join(activity.get_bundle_path(), 'data/ssb_data')\n data_path = os.path.join(activity.get_activity_root(), 'data')\n\n if os.path.isdir(ssb_data_path):\n # we can't use shutil.copytree for the entire dir\n for i in os.listdir(ssb_data_path):\n src = os.path.join(ssb_data_path, i)\n dst = os.path.join(data_path, i)\n if not os.path.exists(dst):\n if os.path.isdir(src):\n shutil.copytree(src, dst)\n else: # is there a better way?\n shutil.copy(src, dst)\n\ndef list_files(base_dir, ignore_dirs=None, ignore_files=None):\n '''from bundlebuilder.py'''\n result = []\n\n base_dir = os.path.abspath(base_dir)\n\n for root, dirs, files in os.walk(base_dir):\n if ignore_files:\n for pattern in ignore_files:\n files = [f for f in files if not fnmatch(f, pattern)]\n\n rel_path = root[len(base_dir) + 1:]\n for f in files:\n result.append(os.path.join(rel_path, f))\n\n if ignore_dirs and root == base_dir:\n for ignore in ignore_dirs:\n if ignore in dirs:\n dirs.remove(ignore)\n\n return result\n\ndef remove_paths(paths, root=None):\n '''remove all paths in the list, fail silently'''\n if root is not None:\n paths = [os.path.join(root, i) for i in paths]\n \n for path in paths:\n try:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n except OSError:\n logging.warning('failed to remove: ' + path)\n\nclass SSBCreator(object):\n def __init__(self, title, uri):\n self.title = title\n self.name = title.replace(' ', '')\n self.uri = uri\n self.bundle_id = '%s.%sActivity' % (DOMAIN_PREFIX, self.name) \n \n self.bundle_path = activity.get_bundle_path()\n self.data_path = os.path.join(activity.get_activity_root(), 'data')\n self.temp_path = tempfile.mkdtemp() # make sure there's no collisions\n self.ssb_path = os.path.join(self.temp_path, self.name + '.activity')\n \n def __del__(self):\n '''clean up after ourselves, fail silently'''\n shutil.rmtree(self.temp_path, ignore_errors=True)\n \n def change_info(self):\n '''change the .info file accordingly'''\n path = os.path.join(self.ssb_path, 'activity/activity.info')\n \n config = ConfigParser.RawConfigParser()\n config.read(path)\n\n if config.get('Activity', 'name') == 'Browse':\n version = 1\n else:\n version = int(config.get('Activity', 'activity_version')) + 1\n\n config.set('Activity', 'activity_version', version) \n config.set('Activity', 'name', self.title)\n config.set('Activity', 'bundle_id', self.bundle_id)\n config.set('Activity', 'icon', 'activity-ssb')\n\n # write the changes\n f = open(path, 'w')\n config.write(f)\n f.close()\n \n def create(self):\n '''actual creation'''\n # copy the bundle\n shutil.copytree(self.bundle_path, self.ssb_path)\n \n self.change_info()\n \n # add the ssb icon\n shutil.copy(os.path.join(self.ssb_path, 'icons/activity-ssb.svg'),\n os.path.join(self.ssb_path, 'activity'))\n \n # set homepage\n f = open(os.path.join(self.ssb_path, 'data/homepage'), 'w')\n f.write(self.uri)\n f.close()\n\n # copy profile\n ssb_data_path = os.path.join(self.ssb_path, 'data/ssb_data')\n shutil.copytree(self.data_path, ssb_data_path)\n \n # delete undesirable things from the profile\n remove_paths(['Cache', 'cookies.sqlite', 'Google Gears for Firefox'],\n root=os.path.join(ssb_data_path, 'gecko'))\n\n # create MANIFEST\n files = list_files(self.ssb_path, IGNORE_DIRS, IGNORE_FILES)\n f = open(os.path.join(self.ssb_path, 'MANIFEST'), 'w')\n for i in files:\n f.write(i+'\\n')\n f.close()\n\n # create .xo bundle\n # include the manifest\n files.append('MANIFEST')\n\n self.xo_path = os.path.join(self.temp_path, self.name.lower() + '.xo')\n\n # zip everything\n xo = zipfile.ZipFile(self.xo_path, 'w', zipfile.ZIP_DEFLATED)\n for i in files:\n xo.write(os.path.join(self.ssb_path, i), \n os.path.join(self.name + '.activity', i))\n xo.close()\n \n def install(self):\n '''install the generated .xo bundle'''\n bundle = activitybundle.ActivityBundle(self.xo_path)\n bundle.install()\n \n def show_in_journal(self):\n '''send the generated .xo bundle to the journal'''\n jobject = datastore.create()\n jobject.metadata['title'] = self.title\n jobject.metadata['mime_type'] = 'application/vnd.olpc-sugar'\n jobject.metadata['icon-color'] = profile.get_color().to_string()\n jobject.file_path = self.xo_path\n \n datastore.write(jobject)\n \n activity.show_object_in_journal(jobject.object_id) ","repo_name":"lucian1900/Webified","sub_path":"ssb.py","file_name":"ssb.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73865993422","text":"import cv2\r\nimport numpy as np\r\n\r\ni = 1\r\nx1, y1 = 0, 0\r\nx2, y2 = 0, 0\r\nx3, y3 = 0, 0\r\n\r\nprint('Seleccione tres puntos de la imagen')\r\nprint('Puntos elegidos (x,y):')\r\n\r\n\r\ndef seleccionarPuntos(event, x, y, flags, params):\r\n global i, x1, y1, x2, y2, x3, y3\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n if i == 1:\r\n x1, y1 = x, y\r\n cv2.circle(imagen1, (x1, y1), 2, (0, 0, 255), -1)\r\n i += 1\r\n print('Punto 1:', x1, y1)\r\n elif i == 2:\r\n x2, y2 = x, y\r\n cv2.circle(imagen1, (x2, y2), 2, (0, 0, 255), -1)\r\n i += 1\r\n print('Punto 2:', x2, y2)\r\n elif i == 3:\r\n x3, y3 = x, y\r\n cv2.circle(imagen1, (x3, y3), 2, (0, 0, 255), -1)\r\n i += 1\r\n print('Punto 3:', x3, y3)\r\n\r\n\r\nimagen1 = cv2.imread('gamer.jpg', cv2.IMREAD_COLOR)\r\nimagen2 = cv2.imread(\"meme.jpg\", cv2.IMREAD_COLOR)\r\ncv2.namedWindow('Imagen original')\r\ncv2.setMouseCallback('Imagen original', seleccionarPuntos)\r\n\r\nwhile 1:\r\n cv2.imshow('Imagen original', imagen1)\r\n if i == 4:\r\n # src = coordenadas de los puntos en la imagen original.\r\n src = np.float32([[0, 0], [imagen2.shape[1], 0], [0, imagen2.shape[0]]])\r\n # dst = coordenadas de los puntos en la imagen final.\r\n dst = np.float32([[x1, y1], [x2, y2], [x3, y3]])\r\n # Obtención de matriz para transformación.\r\n matriz = cv2.getAffineTransform(src, dst)\r\n # Aplicar transformación afín.\r\n incrustada = cv2.warpAffine(imagen2, matriz, (imagen1.shape[1], imagen1.shape[0]))\r\n # Crear máscara e invertir.\r\n hsv = cv2.cvtColor(incrustada, cv2.COLOR_BGR2GRAY)\r\n ret, mask = cv2.threshold(hsv, 10, 255, cv2.THRESH_BINARY)\r\n maskInv = cv2.bitwise_not(mask)\r\n # Creación de la imagen final.\r\n imagenEnmascarada = cv2.bitwise_and(imagen1, imagen1, mask=maskInv)\r\n incrustarEnmascarada = cv2.bitwise_and(incrustada, incrustada, mask=mask)\r\n imagenFinal = cv2.add(imagenEnmascarada, incrustarEnmascarada)\r\n cv2.imshow('Imagen final', imagenFinal)\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n break\r\ncv2.destroyAllWindows()\r\n","repo_name":"bianchi017/Vision-por-computadora-TPs","sub_path":"TP8.py","file_name":"TP8.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11727284882","text":"from django.utils import timezone\nfrom .models import *\nfrom django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserEditForm, ProfileEditForm, UserRegistrationForm, activationForm, LoginForm\nfrom django.db.models import Sum\nfrom shop.views import * #added\nfrom shop.models import Product\nfrom cart.cart import Cart\nfrom shop.forms import ProductForm\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Profile\nfrom geopy import Nominatim\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\n\nnow = timezone.now()\ndef home(request):\n return render(request, 'portfolio/home.html',\n {'portfolio': home})\n\n\ndef sendConfimationEmail(request):\n profile = Profile.objects.all().filter(user=request.user)[0]\n send_mail('Registration Successful @ Onspar', 'Hello, Thank you for registering with Onspar.\\n\\n\\n Please confirm activation using the token:'+profile.activation_token, 'no-reply@onspar.com', [request.user.email,])\n return render(request, 'portfolio/emailSent.html')\n\ndef activation(request):\n if request.method == 'POST':\n form = activationForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n profile = Profile.objects.all().filter(user=request.user)[0]\n entered_token = cd['entered_token']\n if entered_token == profile.activation_token:\n Profile.objects.all().filter(user=request.user).update(activated=True)\n if profile.profileFilled:\n return render(request, 'portfolio/home.html')\n else:\n return redirect('portfolio:fillProfile')\n else:\n form = activationForm()\n return render(request, 'portfolio/activationPage.html', {'form': form})\n else:\n form = activationForm()\n return render(request, 'portfolio/activationPage.html', {'form': form})\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n new_user = authenticate(username=cd['username'],password=cd['password'])\n if new_user is not None:\n if new_user.is_active:\n login(request, new_user)\n profile = Profile.objects.all().filter(user=request.user)[0]\n if profile.activated:\n if profile.profileFilled:\n return redirect('portfolio:home')\n else:\n return redirect('portfolio:fillProfile')\n else:\n return redirect('portfolio:activation')\n else:\n return HttpResponse('Disabled account')\n else:\n return HttpResponse('Invalid login')\n else:\n form = LoginForm()\n return render(request, 'portfolio/login.html', {'form':form})\n\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Create a new user object but avoid saving it yet\n new_user = user_form.save(commit=False)\n # Set the chosen password\n new_user.set_password(\n user_form.cleaned_data['password'])\n # Save the User object\n new_user.save()\n profile = Profile.objects.create(user=new_user)\n send_mail('Registration Successful @ Onspar', 'Hello, Thank you for registering with Onspar. Please confirm activation using the token:'+profile.activation_token, 'no-reply@onspar.com', [new_user.email,])\n return render(request,\n 'account/register_done.html',\n {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request,\n 'account/register.html',\n {'user_form': user_form})\ndef employee(request):\n products = Product.objects.filter(available=True)\n return render(request, 'portfolio/admin.html', {'products': products})\n\ndef notifications(request):\n products = Product.objects.all()\n requireRestock = []\n for product in products:\n if (product.stock <= 20):\n requireRestock.append(product)\n return render(request,'portfolio/notifications.html',{'notifications': requireRestock})\n\n\n@login_required\ndef myProfile(request):\n my_profile = Profile.objects.all().filter(user=request.user)\n if len(my_profile) > 0 and my_profile[0].profileFilled:\n geolocator = Nominatim()\n location = geolocator.geocode(str(my_profile[0].address)+\", \"+str(my_profile[0].city))\n return render(request,\n 'portfolio/myProfile.html',\n {'user': request.user,\n 'profile': my_profile[0],\n 'lat': location.latitude,\n 'long': location.longitude,\n 'loc': str(my_profile[0].address)+\", \"+str(my_profile[0].city)})\n\n else:\n my_profile = Profile.objects.all().filter(user=request.user)\n if len(my_profile) == 0:\n profile = Profile.objects.create(user=request.user)\n return redirect('portfolio:fillProfile')\n\n\n@login_required\ndef fillProfile(request):\n if request.method == 'POST':\n profile = Profile.objects.all().filter(user=request.user)[0]\n user_form = UserEditForm(instance=request.user,data=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile,\n data=request.POST,\n files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n Profile.objects.all().filter(user=request.user).update(profileFilled=True)\n return redirect('portfolio:home')\n\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(\n instance=request.user.profile)\n return render(request,\n 'portfolio/fillProfile.html',\n {'user_form': user_form,\n\n 'profile_form': profile_form})\n\n\n\n@login_required\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user,data=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile,\n data=request.POST,\n files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(\n instance=request.user.profile)\n return render(request,\n 'portfolio/editProfile.html',\n {'user_form': user_form,\n 'profile_form': profile_form})\n\n\n\n\n@login_required\ndef employee_product_edit(request, pk):\n product = get_object_or_404(Product, pk=pk)\n print(\"I am here\")\n if request.method == \"POST\":\n # update\n form = ProductForm(request.POST, instance=product)\n if form.is_valid():\n product = form.save(commit=False)\n product.updated = timezone.now()\n product.save()\n products = Product.objects.filter(available=True)\n return render(request, 'portfolio/admin.html', {'products': products})\n else:\n # edit\n print(\"I am here\")\n form = ProductForm(instance=product)\n return render(request, 'portfolio/product_update.html', {'form': form})\n\n\n@login_required\ndef product_new(request):\n if request.method == \"POST\":\n form = ProductForm(request.POST)\n if form.is_valid():\n product = form.save(commit=False)\n product.created = timezone.now()\n product.save()\n products = Product.objects.filter(available=True)\n return render(request, 'portfolio/admin.html',\n {'products': products})\n else:\n form = ProductForm()\n return render(request, 'portfolio/product_add.html', {'form': form})\n\n\n\n@login_required\ndef employee_product_delete(request, pk):\n product = get_object_or_404(Product, pk=pk)\n product.delete()\n return redirect('portfolio:employee_view')\n","repo_name":"onsparproject/8380Team5ProjectCodeRepo","sub_path":"portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20872017748","text":"# coding: utf-8\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core import signing\n\nfrom utils import string_template\nfrom campaigns.models import Campaign, CampaignLocationShift\nfrom .models import Volunteer\n\n\nTEMPLATE = u'''{% extends \"campaigns/base.html\" %}\n{% block title %}Tere, {{ volunteer.name }}!{% endblock title %}\n\n{% block header %}\n

Tere, {{ volunteer.name }}!

\n{% endblock header %}\n\n{% block content %}\n
\n

Valitud vahetused:

\n {% for shift in volunteer.shifts %}\n
    \n
  • {{ shift.detailed_info }}
  • \n
\n {% endfor %}\n

Käesolev info on saadetud ka sisestatud meiliaadressile.

\n ${content}\n
\n{% endblock content %}\n'''\n\ndef volunteer_detail(request, key):\n try:\n campaign = Campaign.objects.get(is_active=True)\n except Campaign.DoesNotExist:\n return render(request, 'campaigns/no-active-campaign.html')\n\n data = signing.loads(key)\n volunteer = get_object_or_404(Volunteer, pk=data['pk'])\n\n context = {'volunteer': volunteer}\n content = string_template.render_campaign_registration_template(TEMPLATE,\n campaign, request, context)\n\n return HttpResponse(content)\n","repo_name":"mrts/foodbank-campaign","sub_path":"src/volunteers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8061411984","text":"# by Anissa and Pratyusha \nimport csv\nimport json\n\n#Read vegetables.csv into a variable called vegetables.\nwith open('vegetables.csv') as f:\n reader = csv.DictReader(f)\n rows = list(reader)\n vegetables = [dict(row) for row in rows] \n\n#Loop through vegetables and filter down \n#to only green vegtables using a whitelist.\n# set the filter to color = green\ngreen_vegetables = []\nfor veggie in vegetables:\n if veggie['color'] == 'green':\n green_vegetables.append(veggie)\n#Print veggies to the terminal\n#print(green_vegetables)\n#Write the veggies to a json file called greenveggies.json\nwith open('green_vegetables.json', 'w') as f:\n json.dump(green_vegetables, f, indent=2)\n# Bonus: Output another csv called green_vegetables.csv.","repo_name":"aabdeljelil/python-playground","sub_path":"filterveggies.py","file_name":"filterveggies.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3300289456","text":"import time\nimport yaml\nimport gym\nimport numpy as np\n\nfrom argparse import Namespace\nfrom matplotlib import pyplot as plt \n\nimport tensorflow as tf\nimport logging as log\n\nfrom tensorflow import keras\nfrom planner.purepursuit import PurePursuitPlanner\nfrom planner.astar import AStarPlanner\nfrom agent.DQN import NN,Agent,processing,make_state\n\nif __name__ == '__main__':\n\n work = {'mass': 3.463388126201571, 'lf': 0.15597534362552312, 'tlad': 0.82461887897713965, 'vgain': 0.90338203837889}\n with open('./obs_example/config_obs.yaml') as file:\n # with open('./obs_new_round/config_obs.yaml') as file:\n conf_dict = yaml.load(file, Loader=yaml.FullLoader)\n conf = Namespace(**conf_dict)\n episode = 1\n log.basicConfig(level=log.INFO)\n\n env = gym.make('f110_gym:f110-v0', map=conf.map_path, map_ext=conf.map_ext, num_agents=1)\n obs, step_reward, done, info = env.reset(np.array([[conf.sx, conf.sy, conf.stheta]]))\n ex_state = processing(obs)\n state_size = ex_state.shape[1]\n driver = Agent(state_size, test=True)\n agent = driver.load_model()\n rewards = []\n\n for i in range(episode):\n obs, step_reward, done, info = env.reset(np.array([[conf.sx, conf.sy, conf.stheta]]))\n\n env.render()\n planner = PurePursuitPlanner(conf, 0.17145+0.15875)\n\n laptime = 0.0\n start = time.time()\n speeds = [0]\n\n while not done:\n desire_obs = list()\n\n planner.load_laser_point(obs['scans'][0])\n planner.load_poses(obs['poses_x'][0], obs['poses_y'][0])\n planner.get_obstacle_trajectory()\n current_pose = [obs['poses_x'][0],obs['poses_y'][0]]\n current_wps = planner.current_waypoint\n log.info(f\"[current_wps]: {current_wps}\")\n log.info(f\"[current_pose]: {current_pose}\")\n astar_flag = planner.find_obstacle_between_wpts()\n # astar_flag = False\n\n if astar_flag:\n obs_cord = planner.shortest_obs_pose \n log.info(f\"[obs_cord]: {obs_cord}\")\n goal_idx = planner.set_goal(obs_cord)\n log.info(f\"[i, i2]: {planner.i, planner.i2}\")\n log.info(f\"[goal_idx]: {goal_idx}\")\n goal_cord = planner.get_wpts_from_idx(planner.i2+3)\n log.info(f\"[goal_cord]: {goal_cord}\")\n step = 0\n\n a = AStarPlanner(1, 0, show_animation= False)\n\n _obs = {\n 'x': int(obs_cord[0] * 10),\n 'y': int(obs_cord[1] * 10)\n }\n _points = {\n 'current': {\n 'x': int(current_pose[0] * 10),\n 'y': int(current_pose[1] * 10)\n },\n 'future': {\n 'x': int(goal_cord[0] * 10),\n 'y': int(goal_cord[1] * 10)\n }\n }\n\n new_trac = a.plan(obstacle=_obs, waypoints=_points)\n\n if str(type(new_trac)) == \"\":\n log.warn(f\"{new_trac}\")\n else:\n new_trac = np.array(new_trac)\n new_trac = make_state(new_trac)\n\n #주행 현재 관측상태\n current_obs = obs\n current_state = processing(obs)\n\n #agent 행동 수행\n action_num = np.argmax(agent.predict(current_state))\n action = driver.action[action_num]\n\n #주행 speed와 steer를 넣고 주행한 후 다음 상태\n next_obs, step_reward, done, info = env.step(np.array([action]))\n next_state = processing(next_obs)\n \n else:\n speed, steer = planner.plan(obs['poses_x'][0], obs['poses_y'][0], obs['poses_theta'][0], work['tlad'], work['vgain'])\n speeds.append(speed)\n print('speed,steer:',speed,steer)\n # speed = 1.5\n action = np.array([[steer, speed]])\n obs, step_reward, done, info = env.step(np.array(action))\n laptime += step_reward\n env.render(mode='human')\n # time.sleep(1000)\n rewards.append(laptime)\n print(laptime)\n print('Sim elapsed time:', laptime, 'Real elapsed time:', time.time() - start)\n","repo_name":"zygn/Capstone_AD1","sub_path":"gym/train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34663879154","text":"from inputs import *\nload('1181')\nN = int(input())\n\nwords = set()\n\nfor _ in range(N):\n word = input()\n count = len(word)\n words.add((word, count))\n\nfor word, _ in sorted(words, key=lambda x: (x[1], x[0])):\n print(word)","repo_name":"eianlee1124/daily-practice","sub_path":"BOJ/1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34250926493","text":"from django import forms\r\nfrom blog.models import Post, Comment\r\n\r\n\r\nclass PostForm(forms.ModelForm):\r\n class Meta():\r\n model = Post\r\n fields = ('author', 'title', 'text')\r\n\r\n # connecting the specic fields to CSS,\r\n widgets = {\r\n 'title': forms.TextInput(attrs={'class': 'textinputclass'}), # class are the css class\r\n 'text': forms.Textarea(attrs={\"class\": \"editable medium-editor-textarea postcontent\"}) #editable and medium-editro-textarea are the builtinclass\r\n }\r\n\r\nclass CommentForm(forms.ModelForm):\r\n class Meta():\r\n model = Comment\r\n fields = (\"author\", \"text\")\r\n\r\n widgets = {\r\n 'author': forms.TextInput(attrs={'class': 'textinputclass'}),\r\n 'text': forms.Textarea(attrs={\"class\": \"editable medium-editor-textarea postcontent\"}) #editable and medium-editro-textarea are the builtinclass\r\n }\r\n","repo_name":"kottalashiva/Python","sub_path":"Django/blog_project/mysite/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11870002834","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\nimport unittest\n\nfrom decentra_network.lib.settings_system import save_settings\nfrom decentra_network.lib.settings_system import the_settings\nfrom decentra_network.lib.clean_up import CleanUp_tests\n\n\nclass Test_Settings(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n CleanUp_tests()\n\n def test_settings_by_creating_settings(self):\n temp_settings = the_settings()\n self.assertIsNotNone(temp_settings[\"test_mode\"],\n \"A problem on the test_mode.\")\n self.assertIsNotNone(temp_settings[\"debug_mode\"],\n \"A problem on the debug_mode.\")\n\n def test_settings_by_saving_and_getting_new_settings(self):\n backup_settings = the_settings()\n\n temp_settings = the_settings()\n\n temp_settings[\"test_mode\"] = True\n temp_settings[\"debug_mode\"] = True\n save_settings(temp_settings)\n\n temp_test_settings = the_settings()\n self.assertEqual(\n temp_test_settings[\"test_mode\"],\n True,\n \"A problem on the saving the settings.\",\n )\n self.assertEqual(\n temp_test_settings[\"debug_mode\"],\n True,\n \"A problem on the saving the settings.\",\n )\n\n temp_test_settings[\"test_mode\"] = False\n temp_test_settings[\"debug_mode\"] = False\n save_settings(temp_test_settings)\n\n temp_test_settings2 = the_settings()\n self.assertEqual(\n temp_test_settings2[\"test_mode\"],\n False,\n \"A problem on the saving the settings.\",\n )\n self.assertEqual(\n temp_test_settings2[\"debug_mode\"],\n False,\n \"A problem on the saving the settings.\",\n )\n\n temp_test_settings2[\"test_mode\"] = backup_settings[\"test_mode\"]\n temp_test_settings2[\"debug_mode\"] = backup_settings[\"debug_mode\"]\n save_settings(temp_test_settings2)\n\n\nunittest.main(exit=False)\n","repo_name":"GitHangar/Decentra-Network","sub_path":"tests/unit_tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"31126459218","text":"import csv\nimport datetime\nimport os\nimport prettytable\nimport pytz\nimport tzlocal\n\n\nhome = os.getenv('HOME')\ninput_file = home + \"/.poll-internet.sh.log\"\n\n\ndef decomment(csvfile):\n for row in csvfile:\n raw = row.split('#')[0].strip()\n if raw:\n yield raw\n\n\ndef convert_date(epoch):\n tz = tzlocal.get_localzone()\n dt = datetime.datetime.fromtimestamp(int(epoch), tz)\n return dt.strftime('%Y-%m-%d %H:%M:%S %Z%z')\n\n\ndef convert_duration(secs):\n secs = int(secs)\n return \"{:2d}d {:2d}h {:2d}m {:2d}s\".format(secs // 86400,\n secs % 86400 // 3600,\n secs % 3600 // 60,\n secs % 60)\n\n\ndef process_row(table, row):\n table.add_row([convert_date(row[0]),\n convert_date(row[1]),\n convert_duration(row[2])])\n\n\nx = prettytable.PrettyTable()\nx.field_names = [\"Outage Detected\", \"Outage End\", \"Outage Duration\"]\n\nwith open(input_file, mode='r') as csvfile:\n reader = csv.reader(decomment(csvfile))\n for row in reader:\n process_row(x, row)\n\nprint(x)\n","repo_name":"mrda/junkcode","sub_path":"report-outages.py","file_name":"report-outages.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"40240394493","text":"from nltk.sentiment import SentimentIntensityAnalyzer\nfrom short_forms import *\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import wordnet as wn, stopwords\nimport string\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\nfrom nltk.corpus import wordnet as wn\n\n\n\ndef seperate(review):\n # sample = re.split(r' *[\\.\\?!][\\'\"\\)\\]]* *', sentence)\n # sample = re.split(r'[.,&]', sentence)\n # sample = re.sub(r'(and|\\.|but|,)', r'\\1\\n', review).split('\\n')\n\n sample = re.sub(r'(\\.|but)', r'\\1\\n', review).split('\\n')\n\n # (? < !\\w\\.\\w.)(? < ![A - Z][a - z]\\.)(? <= \\.| \\?)\\s\n\n return sample\n\ndef noiseClear (sentence):\n\n # tokenize the sentence\n tokens = tokenize(sentence)\n\n # remove punctuation from each word\n table = str.maketrans('', '', string.punctuation)\n stripped = [w.translate(table) for w in tokens]\n\n # remove remaining tokens that are not alphabetic\n words = [word for word in stripped if word.isalpha()]\n\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n words = [w for w in words if not w in stop_words]\n\n # print(words[:100])\n return words\n\n\ndef lemmetize (word_tokens):\n\n list_of_words = []\n lmtzr = WordNetLemmatizer()\n\n for word in word_tokens:\n tokenized = nltk.tag.pos_tag([word])\n type = tokenized[0]\n\n # verbs in to present simple format\n if type[1] in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:\n list_of_words.append(WordNetLemmatizer().lemmatize(word, 'v'))\n else:\n #plurel -> singular\n list_of_words.append(lmtzr.lemmatize(word))\n\n return list_of_words\n\ndef tokenize(sentence):\n\n # split into words\n tokens = word_tokenize(sentence)\n\n # convert to lower case\n tokens = [w.lower() for w in tokens]\n\n return tokens","repo_name":"thisaripatabendi/sensei","sub_path":"aspectsentiment/identification.py","file_name":"identification.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27041573101","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils import timezone\n\n\ndef create_2014_apogaea_event(apps, schema_editor):\n Shift = apps.get_model('shifts', 'Shift')\n Event = apps.get_model('events', 'Event')\n\n open_at = timezone.now().replace(\n year=2014, month=3, day=1, hour=0, minute=0, second=0, microsecond=0,\n )\n close_at = timezone.now().replace(\n year=2014, month=6, day=1, hour=0, minute=0, second=0, microsecond=0,\n )\n apogaea_2013, _ = Event.objects.get_or_create(\n name='Apogaea 2013',\n defaults={\n 'registration_open_at': open_at,\n 'registration_close_at': close_at,\n },\n )\n\n Shift.objects.all().update(event=apogaea_2013)\n\n # Ensure that next migration which removes the nullability of this field\n # will not fail.\n assert not Shift.objects.filter(event__isnull=True).exists()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('events', '0001_initial'),\n ('shifts', '0012_auto_20150312_1137'),\n ]\n\n operations = [\n migrations.RunPython(create_2014_apogaea_event)\n ]\n","repo_name":"Apogaea/voldb","sub_path":"volunteer/apps/events/migrations/0002_auto_20150312_1137.py","file_name":"0002_auto_20150312_1137.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"21887105385","text":"from plot_utils import rc\nimport numpy as np\nimport numpy.random as rng\nimport matplotlib.pyplot as plt\n\n# Set rng seed\nrng.seed(0)\n\n# Set default fonts etc\nrc()\n\n# True signal\ndef signal(t):\n return np.sin(2 * np.pi * t / (10.0 ** -0.5))\n\n# Number of data points\nn = 101\n\n# i-values as defined in the paper (i.e., starting from 1)\ni = np.arange(0, n) + 1\n\n# Two observing strategies\nt_even = (i - 1) / (n - 1)\nt_uneven = ((i - 0.5) / n)**3\n\n# 'Continuous' time\nt = np.linspace(0.0, 1.0, 1001)\n\n# Data\ny_even = signal(t_even) + 0.1*rng.randn(n)\ny_uneven = signal(t_uneven) + 0.1*rng.randn(n)\ny_smooth = signal(t)\n\nplt.plot(t, y_smooth, \"k\", label=\"True signal\", alpha=0.5)\nplt.errorbar(t_even, y_even, color=\"orange\", fmt=\"o\", yerr=0.1,\n label=\"Even data\", alpha=0.3)\nplt.errorbar(t_uneven, y_uneven, color=\"green\", fmt=\"o\", yerr=0.1,\n label=\"Uneven data\", alpha=0.3)\nplt.xlabel(\"$t$\", fontsize=16)\nplt.ylabel(\"$y$\", fontsize=16)\nplt.ylim([-1.3, 2.4])\nplt.legend(loc=\"upper left\")\nplt.savefig(\"sinewave.pdf\", bbox_inches=\"tight\")\nplt.show()\n\n","repo_name":"eggplantbren/InfoNest","sub_path":"paper/figures/sinewave.py","file_name":"sinewave.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"74389917261","text":"from flask import *\nfrom flask_cors import CORS\nimport requests\nfrom MySQL_con import *\nimport datetime\n# .env \nfrom dotenv import load_dotenv\nimport os\nload_dotenv()\ns3_url = os.getenv(\"s3_url\")\ncloudFront_url = os.getenv(\"cloudFront_url\")\n\napp=Flask(\n\t__name__,\n\tstatic_folder=\"static\",\n static_url_path=\"/static\"\n)\n\nCORS(app)\n# Pages\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n@app.route(\"/api/image\", methods=[\"PUT\",\"GET\"])\ndef image():\n if request.method == \"PUT\":\n try:\n\n rawData = request.get_json()\n # print(\"rawData data type\",type(rawData))\n current_time_code = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n image_type = rawData[\"image_type\"]\n image_name = current_time_code+\".\"+image_type\n connent = rawData[\"connent\"]\n image_raw = rawData[\"image_raw\"]\n image_raw = bytes(image_raw)\n headers = {\n \"Content-Type\": f\"image/{image_type}\",\n }\n s3_upload_url = f\"{s3_url}/{image_name}\"\n print(\"C1\",s3_upload_url)\n s3_upload = requests.put(s3_upload_url,headers=headers, data=image_raw, timeout=30)\n request_status = s3_upload.status_code\n if request_status == 200:\n # into MySQL\n sql_command = \"\"\"\n INSERT INTO img_connent (connent, imagename)\n VALUES (%s,%s);\n \"\"\" \n value_input = (connent,image_name)\n insert_or_update_data(sql_command,value_input)\n # get data from MySQL\n sql_command=\"\"\"\n SELECT connent, imagename\n FROM img_connent \n ORDER BY id DESC LIMIT 1;\n \"\"\"\n user_info = query_data_read(sql_command)\n image_name_get = user_info[0][\"imagename\"]\n connent_get = user_info[0][\"connent\"]\n cloudFront_download_url = f\"{cloudFront_url}/{image_name_get}\"\n\n data = {\n \"imageUrl\":cloudFront_download_url,\n \"connent\":connent_get\n } \n return jsonify(data), 200\n except Exception as ex:\n return jsonify(error=\"true\", message=f\"{ex}\"), 500\n if request.method == \"GET\":\n try:\n sql_command=\"\"\"\n SELECT connent, imagename\n FROM img_connent \n \"\"\"\n user_info = query_data_read(sql_command)\n print(\"user_info\",user_info)\n # len(user_info)\n dataSum = []\n for user_info_list in user_info:\n connent = user_info_list[\"connent\"]\n imagename = user_info_list[\"imagename\"]\n image_url = f\"{cloudFront_url}/{imagename}\"\n data = {\n \"connent\":connent,\n \"imageUrl\":image_url\n }\n print(data)\n dataSum.append(data)\n print(\"dataSum\",dataSum)\n return dataSum\n except Exception as ex:\n return jsonify(error=\"true\", message=f\"{ex}\"), 500\n\n\napp.debug = True\napp.run(host = \"0.0.0.0\",port=80)","repo_name":"monsterbat/BackEnd-Practice","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18435676677","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom rest_framework import routers\nfrom screen import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'leagues', views.LeagueViewSet)\nrouter.register(r'teams', views.TeamViewSet)\nrouter.register(r'couches',views.CoachViewSet)\nrouter.register(r'players',views.PlayerViewSet)\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'mexicoder.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n)\n","repo_name":"eren152/mexicoder_screen","sub_path":"mexicoder/mexicoder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4538746772","text":"#!/usr/bin/env python\nimport sys\nimport flask\nfrom pprint import pprint\nimport pickle\nimport json\n\nfrom pymongo import MongoClient\n\n#---------- OPEN DATABASE CONNECTION----------------#\nclient = MongoClient()\ndb = client.cocktailapp\ncollection = db.cocktaildb\n\ndef mongo_query(ingredients_list=[]):\n return collection.aggregate([{\n \"$project\": {\n \"name\": 1,\n \"site_id\": 1,\n \"glass_type\": 1,\n \"instructions\": 1,\n \"ingredients.ingredient\": 1,\n \"recognitions\": 1,\n \"AisSubset\": {\n \"$setIsSubset\": [\"$ingredients.ingredient\", ingredients_list]\n },\n \"num_ingredients\": {\"$size\": \"$ingredients\"}\n }\n },\n {\n \"$match\": {\n \"AisSubset\": True,\n \"num_ingredients\": {\"$gt\": 0}\n }\n \n },\n {\n \"$project\": {\n \"name\": 1,\n \"site_id\": 1,\n \"ingredients.ingredient\":1,\n \"glass_type\": 1,\n \"instructions\": 1,\n \"recognitions\": 1,\n \"_id\": 0,\n }\n }\n ])\n\ndef drinks_short_n(ingredients_list=[], n=1):\n return collection.aggregate(\n [\n { \"$project\": { \"ingredients.ingredient\": 1, \n \"name\": 1,\n \"site_id\": 1,\n \"instructions\": 1,\n \"recognitions\": 1,\n \"glass_type\": 1,\n \"inBOnly\": \n { \"$setDifference\": [\"$ingredients.ingredient\", \n ingredients_list] \n }, \n } \n },\n {\n \"$match\": { \n \"inBOnly\": {\"$size\": 1}\n }\n \n },\n {\n \"$project\": {\n \"name\": 1,\n \"site_id\": 1,\n \"ingredients.ingredient\":1,\n \"glass_type\": 1,\n \"instructions\": 1,\n \"recognitions\": 1,\n \"_id\": 0,\n }\n },\n { \"$limit\" : 5 }\n ]\n )\n\n#pprint(drink_dict)\n\n#---------- URLS AND WEB PAGES -------------#\n\n# Initialize the app\napp = flask.Flask(__name__, static_url_path = \"/static\")\n\n# Homepage\n@app.route(\"/\")\ndef viz_page():\n \"\"\"\n Homepage: serve our visualization page, awesome.html\n \"\"\"\n #with open(\"index.html\", 'r') as viz_file:\n # return viz_file.read()\n db_spirits = sorted([\"gin\",\n \"rum\",\n \"tequila\",\n \"vodka\",\n \"Scotch\",\n \"rye\"], key=str.lower)\n db_liqueurs = sorted([\"Cointreau\",\n \"sweet vermouth\",\n \"dry vermouth\",\n \"Campari\",\n \"Midori\"], key=str.lower)\n db_mixers = [\"tonic water\",\n \"soda\",\n \"ginger beer\"]\n db_juices = [\"orange juice\",\n \"grapefruit juice\",\n \"fresh lime juice\"]\n db_bitters = [\"Angustora bitters\",\n \"orange bitters\",\n \"Peychaud's bitters\"]\n db_garnishes = [\"lemon\",\n \"lime\",\n \"orange\"]\n db_ingredients = [{\"category\": \"Spirits\",\n \"ingredients\": db_spirits}, \n {\"category\": \"Liqueurs\",\n \"ingredients\": db_liqueurs},\n {\"category\": \"Mixers\",\n \"ingredients\": db_mixers},\n {\"category\": \"Juices\",\n \"ingredients\": db_juices}, \n {\"category\": \"Bitters\",\n \"ingredients\": db_bitters}, \n {\"category\": \"Garnishes\",\n \"ingredients\": db_garnishes}]\n return flask.render_template(\"index.html\", db_ingredients = db_ingredients)\n\n# Get an example and return it's score from the predictor model\n@app.route(\"/subset\", methods=[\"POST\"])\ndef subset():\n \"\"\"\n When A POST request with json data is made to this uri,\n Get the cocktails that can be made with the subset of ingredients\n \"\"\"\n # Get decision score for our example that came with the request\n \n data = flask.request.json\n ingredients_list = data[\"ingredients\"]\n\n drink_dict = mongo_query(ingredients_list)\n drink_dict_ids = [x[\"site_id\"] for x in drink_dict[\"result\"]]\n orig_length = len(drink_dict[\"result\"])\n\n #extended_drink_list = ingredients_list + [\"fresh lime juice\"]\n extended_drink_dict = drinks_short_n(ingredients_list)\n\n # extended_drink_dict[\"result\"] = [x for x in extended_drink_dict[\"result\"] \n # if x[\"site_id\"] not in drink_dict_ids]\n #import pdb; pdb.set_trace()\n new_length = len(extended_drink_dict[\"result\"])\n\n pprint(drink_dict)\n print >> sys.stderr, \"old %i versus new %i\" % (orig_length, new_length)\n \n #return flask.jsonify(extended_drink_dict)\n\n \n results = {\"drinks\": drink_dict, \n \"extended_drinks\": extended_drink_dict}\n return flask.jsonify(results)\n\n#--------- RUN WEB APP SERVER ------------#\n\napp.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"bo-peng/cocktailapp","sub_path":"drink.py","file_name":"drink.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9606956795","text":"import os\n\nimport pyarchive\n\nimport zope.interface\nimport zope.component\n\nimport p6\nimport p6.ui.events\nimport p6.storage.common\nimport p6.extension.exceptions\n\nfrom p6 import api\nfrom p6.metadata.interfaces import IMetadataStorage\nfrom ccpublisher.interfaces import IEmbeddable\n\nimport ui\n\nclass CallbackBridge(object):\n \"\"\"Bridge pyarchive status update callbacks to P6 events.\"\"\"\n \n def __init__(self):\n pass\n \n def reset(self, steps=1, filename=None, status=''):\n if filename is not None:\n status = 'Uploading %s...' % filename\n steps = os.stat(filename).st_size\n \n resetEvt = p6.ui.events.ResetStatusEvent(steps=steps, message=status)\n zope.component.handle(resetEvt)\n \n def increment(self, status=\"\", steps=1):\n update = p6.ui.events.UpdateStatusEvent(delta=steps,\n message=status)\n zope.component.handle(update)\n \n def finish(self):\n pass\n \n def __call__(self, bytes=1):\n self.increment(steps=bytes)\n \n\ndef selfhostMetadataUi(storage):\n\n class SelfHostMetadataUi(object):\n\n zope.interface.implements(p6.ui.interfaces.IPageList)\n\n def __init__(self, target, event):\n self.__pages = None\n self.__storage = storage\n\n def createPages(self):\n \n # XXX -- hack\n # \n # We import here because doing so at instantiation causes problems\n # -- in particular, the App needs to be created before other\n # UI objects, and the import has side effects (querying the\n # background color)\n \n import p6.ui.pages.fieldrender\n \n # create the simple page\n fields = [\n p6.metadata.base.metadatafield(p6.metadata.types.ITextField)(\n 'vurl', 'Verification URL'),\n ]\n\n self.__pages = []\n\n desc = \"Please enter the URL where you will host your \" \\\n \"verification metadata. In most cases, this will \" \\\n \"be the page you link to your MP3 file from.\"\n \n self.__pages.append(\n lambda x: p6.ui.pages.fieldrender.SimpleFieldPage(\n x, 'SELFHOST_UI_META', 'Self Hosted Files', fields,\n self.callback, description=desc))\n\n def list(self):\n # see if we've been activated\n if (self.__storage.activated()):\n \n if self.__pages is None:\n self.createPages()\n\n return self.__pages\n else:\n # not activated, so don't ask for information\n return []\n\n def callback(self, value_dict):\n\n # make sure the verification URL is specified\n if not( ('vurl' in value_dict) and (value_dict['vurl']) ):\n raise p6.extension.exceptions.ExtensionSettingsException(\n \"You must supply the verification URL.\")\n\n # store the credentials for future use\n self.storage.verification_url = value_dict['vurl']\n\n self.storage.registerEvents()\n\n return SelfHostMetadataUi\n\ndef selfhostStorageFinalPage(storage):\n\n class SelfHostFinalPage(object):\n\n zope.interface.implements(p6.ui.interfaces.IPageList)\n\n def __init__(self, target, event):\n self.__pages = [ui.FinalPage]\n self.__storage = storage\n\n def __expand(self):\n \"\"\"Perform last minute string interpolation.\"\"\"\n\n if getattr(ui.FinalPage, 'needsExpansion', 'True'):\n # only do this once...\n ui.FinalPage.PAGE_XRC = ui.FinalPage.PAGE_XRC % \\\n self.__storage.uri\n ui.FinalPage.needsExpansion = False\n \n def list(self):\n # see if we've been activated\n if (self.__storage.activated()):\n\n self.__expand()\n return self.__pages\n else:\n # not activated, so don't make a contribution to the UI\n return []\n\n return SelfHostFinalPage\n\nclass SelfHostStorage(p6.metadata.base.BasicMetadataStorage,\n p6.storage.common.CommonStorageMixin):\n \n zope.interface.implements(p6.metadata.interfaces.IMetadataStorage,\n p6.storage.interfaces.IStorage)\n\n id = 'SELFHOST_STORAGE'\n name = 'Self-hosted Files'\n description = 'Create metadata suitable for use with files hosted ' \\\n 'on your personal web site.'\n \n # metadata interface\n def __init__(self):\n p6.metadata.base.BasicMetadataStorage.__init__(self)\n\n # register handlers for extension points --\n # this allows us to extend the user interface in a unified way\n # \n zope.component.provideSubscriptionAdapter(\n selfhostMetadataUi(self),\n (p6.extension.interfaces.IStorageMetaCollection,\n p6.extension.events.IExtensionPageEvent,\n ),\n p6.ui.interfaces.IPageList)\n\n zope.component.provideSubscriptionAdapter(\n selfhostStorageFinalPage(self),\n (p6.extension.interfaces.IPostStoreExtension,\n p6.extension.events.IExtensionPageEvent,\n ),\n p6.ui.interfaces.IPageList)\n\n def validate(self, event=None):\n # determine the appropriate collection\n work_type = api.findField('format')\n\n if work_type:\n work_type = work_type.lower()\n else:\n # no work type; can not validate\n raise KeyError(\"work_type not specified.\")\n\n def store(self, event=None):\n # generate the RDF\n pass\n \n","repo_name":"BackupTheBerlios/cctools-svn","sub_path":"publisher/tags/ccpublisher-1.9.3/ccpublisher/selfhost.py","file_name":"selfhost.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14037039937","text":"class Solution:\n def func(self,a):\n l=string.ascii_lowercase\n return str(l.index(a)+1) if l.index(a)+1<10 else str(l.index(a)+1)+\"#\"\n def freqAlphabets(self, s: str) -> str:\n l=string.ascii_lowercase\n m=map(self.func,l)\n n=list(m)\n r=len(s)-1\n a=[]\n while r>=0:\n if s[r]==\"#\":\n a.append(s[r-2:r+1])\n r-=3\n else:\n a.append(s[r])\n r-=1\n a=a[::-1]\n z=\"\"\n for i in a:\n z+=l[n.index(i)]\n return z","repo_name":"Mihretthe/Competitive-Programming","sub_path":"1309-decrypt-string-from-alphabet-to-integer-mapping/1309-decrypt-string-from-alphabet-to-integer-mapping.py","file_name":"1309-decrypt-string-from-alphabet-to-integer-mapping.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"37085788455","text":"#Raízes de uma equação do 2ºgrau\nimport math\na=int(input(\"Introduza o valor de a:\"))\nb=int(input(\"Introduza o valor de b:\"))\nc=int(input(\"Introduza o valor de c:\"))\ndelta=b**2-4*a*c\n\nif delta<0:\n print(\"Esta equação não possui raizes reais.\")\nelse:\n x1 = float((-b - math.sqrt(delta)) / (2 * a))\n x2 = float((-b + math.sqrt(delta)) / (2 * a))\n if delta==0:\n print(\"A raiz dupla desta equação é: \", x1)\n else:\n if x1.+)')\n\n def __init__(self, subdomain, *args, **kwargs):\n super(LuccaBrowser, self).__init__(*args, **kwargs)\n self.BASEURL = 'https://%s.ilucca.net' % subdomain\n self.id_card_doc = None\n\n def do_login(self):\n self.login.go()\n self.page.do_login(self.username, self.password)\n\n if not self.home.is_here():\n self.page.check_error()\n raise Exception('error is not handled')\n\n @need_login\n def all_events(self, start, end):\n self.users.go()\n users = {u.id: u for u in self.page.iter_users()}\n\n last = None\n while True:\n if end:\n if end < start:\n break\n else:\n if last and last + timedelta(days=300) < start:\n self.logger.info('300 days without event, stopping')\n break\n\n window_end = start + timedelta(days=14)\n\n params = {\n 'date': 'between,%s,%s' % (start.strftime('%Y-%m-%d'), window_end.strftime('%Y-%m-%d')),\n 'leavePeriod.ownerId': ','.join(str(u.id) for u in users.values()),\n 'fields': 'leavePeriod[id,ownerId,isConfirmed],isAm,date,color,isRemoteWork,leaveAccount[name,isRemoteWork]',\n }\n self.calendar.go(params=params)\n events = self.page.iter_events(start, users=users)\n for event in sorted(events, key=lambda ev: new_datetime(ev.start_date)):\n if end and event.start_date >= end:\n continue\n yield event\n last = new_datetime(event.start_date)\n\n start = window_end + timedelta(days=1)\n\n @need_login\n def iter_subscriptions(self):\n params = {'fields': 'id,employeeNumber,extendedData'}\n self.subscription.go(params=params)\n yield self.page.get_subscription()\n\n self.id_card_doc = self.page.get_id_card_document()\n\n @need_login\n def iter_documents(self, subscription):\n yield self.id_card_doc\n\n params = {\n 'fields': 'id,import[name,startDate,endDate]',\n 'ownerId': subscription._owner_id,\n 'orderBy': 'import.endDate,desc,import.startDate,desc,import.creationDate,desc',\n }\n self.payslips.go(params=params)\n for doc in self.page.iter_documents():\n yield doc\n","repo_name":"rbignon/woob","sub_path":"modules/lucca/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"47"} +{"seq_id":"8491325026","text":"file = open(\"1/1.txt\", \"r\")\nlines = file.readlines()\n\nmaxCal = [0, 0, 0]\ntemp = 0\nfor l in lines:\n l = l.strip()\n if len(l) > 0:\n temp = temp + int(l)\n else:\n for i in range(0, len(maxCal)):\n if maxCal[i] < temp:\n maxCal.insert(i, temp)\n maxCal.pop()\n break\n temp = 0\n\nval = 0\nfor mc in maxCal:\n val = val + mc\nprint(val)\n# Don't like this solution but it will do for now","repo_name":"OskarHokkanen/AdventOfCode2022","sub_path":"1/Calorie_Counting_2.py","file_name":"Calorie_Counting_2.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71714416462","text":"#!/usr/bin/env python3\nimport re\nmain = []\nsubr = []\nx = input()\nwhile x != '':\n x = x.split()\n for i in range(len(x)):\n if x[i][:4] == 'proc':\n line = re.split(r'[,)]+', x[i][10:])\n if line[1] == 'fw':\n line[1] = 'forward'\n if line[1] == 'turn':\n line[1] = 'beep'\n if line[1] == 'subr':\n line[1] = 'subroutine'\n if x[i][:9] == 'proc(main':\n main.append((int(line[0]), line[1]))\n else:\n subr.append((int(line[0]), line[1]))\n x = input()\n# print(len(main))\nmain = sorted(main, key=lambda x: x[0])\nsubr = sorted(subr, key=lambda x: x[0])\n# main.sort(lambda x: x[0])\n# subr.sort(lambda x: x[0])\nfor i in main:\n print(i[1], end=' ')\nprint('.')\nfor i in subr:\n print(i[1], end=' ')\nprint('.')\n","repo_name":"HtBest/lpcp2022_light_bot","sub_path":"post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2082502395","text":"# practice\n\n'''\n如果使用defaultdict的话,就可以避免KeyError,并且可以简化代码量。\ndefaultdict(, {'bobby1': 2, 'bobby2': 3, 'bobby3': 1})\n'''\n\nfrom collections import defaultdict\n\nusers = ['bobby','bobby1','bobby2','bobby1','bobby','bobby']\n\nstatistics = defaultdict(int)\nfor user in users:\n statistics[user] += 1\n\nprint(statistics)\n\n'''\n下面是不适用defaultdict的情况下来统计每个元素出现的次数\n由于直接使用statistics[user]可能会出现KeyError的异常,\n所以这里使用字典的get方法,如果字典里没有这个key,那么就会返回None\n第一次遍历到的时候,字典里面是没有这个key的,所以直接让statistics[user] = 1\n第二次遍历开始就不断+1\n\n'''\nusers = ['bobby','bobby1','bobby2','bobby1','bobby','bobby']\n\nstatistics = {}\nfor user in users:\n statistics[user] = statistics.get(user,0) + 1\nprint(statistics)\n","repo_name":"KamiC6238/practice","sub_path":"Number of Statistics.py","file_name":"Number of Statistics.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38387826387","text":"''' Plotting 2 graphs on the same plot using matplotlib '''\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ny2_values=[29,27,29,30,31,31,34,33,34,34,33,32,30,30,21,27,29]\r\ny1_values=[28,29,29,30,31,32,33,33,34,34,33,32,31,30,29,29,29]\r\nx_values=[]\r\nthing=6\r\n\r\nwhile len(x_values)!=len(y1_values):\r\n if thing!=24:\r\n x_values.append(str(thing)+\":30\")\r\n thing=thing+1\r\n else:\r\n x_values.append(\"00\"+\":30\")\r\n thing=1\r\n\r\nplt.plot(x_values,y1_values,x_values,y2_values,marker=\"o\")\r\nplt.legend([\"Chennai Temp\",\"Puducherry Temp\"])\r\nplt.title(\"Temperature Variation of Chennai\")\r\nplt.xlabel(\"24 hour Time\")\r\nplt.ylabel(\"Temperature in degrees Celcius\")\r\nplt.axis(ymin=0)\r\nplt.axis(ymax=40)\r\nplt.axis(xmax=len(x_values))\r\nplt.tick_params(axis='x', which='major', labelsize=5.5)\r\n\r\nplt.show()\r\n","repo_name":"Saivenkat1903/My_Python_Problems_and_Solutions","sub_path":"Temperature_Comparison.py","file_name":"Temperature_Comparison.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7270557302","text":"n = int(input())\r\nlst = [int(input()) for _ in range(n)]\r\nstack = []\r\nnum = 1\r\nans = []\r\nfor x in lst:\r\n while num<=x:\r\n stack.append(num)\r\n ans.append('+')\r\n num+=1\r\n if x==stack[-1]:\r\n stack.pop()\r\n ans.append('-')\r\n elif x int:\n graph = collections.defaultdict(list)\n for u, v, w in flights:\n graph[u].append((v, w))\n\n heap = [(0, 0, src)]\n while heap:\n cost, stops, cur_stop = heapq.heappop(heap)\n if cur_stop == dst: # 需要在开头判断才是正确的\n return cost\n if stops <= K:\n for v, w in graph[cur_stop]:\n heapq.heappush(heap, (cost + w, stops + 1, v))\n\n return -1","repo_name":"zihuaweng/leetcode-solutions","sub_path":"leetcode_python/787.Cheapest_Flights_Within_K_Stops.py","file_name":"787.Cheapest_Flights_Within_K_Stops.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"3481590079","text":"from HTMLParser import HTMLParser\nfrom forum import Thread, Post\n\nclass ParserBase(HTMLParser):\n \"\"\"base class for ThreadParser and ForumParser\n to get data out, attach listeners that are a tuple like\n (tagname, attribute name, attribute value, function(parser, attributes)\n and it'll fire the function when it hits a matching tag. set the parser's mode\n in the function to grab data, it'll automatically have the mode cleared when the\n parser leaves the tag's scope\"\"\"\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.listeners = []\n self.depth = 0\n\n def set_mode(self, mode):\n self.mode = mode\n self.depth = 0\n\n def handle_starttag(self, tag, attrs):\n if tag == 'img':\n # this was erroneously changing the tag depth count\n return\n\n self.depth = self.depth + 1\n for listener in self.listeners:\n if tag == listener[0]:\n for attr in attrs:\n if attr[0] == listener[1] and attr[1] == listener[2]:\n listener[3](self, attrs)\n\n def handle_endtag(self, tag):\n if self.mode == None:\n return\n self.depth = self.depth - 1\n if self.depth < 0:\n self.set_mode(None)\n\nclass ThreadParser(ParserBase):\n \"\"\"call read_thread with a html string to get back a list of posts\"\"\"\n\n def __init__(self):\n ParserBase.__init__(self)\n self.listeners.append(('table', 'class', 'post', ThreadParser.handle_postid))\n self.listeners.append(('dt', 'class', 'author', lambda parser, attrs: parser.set_mode('author')))\n self.listeners.append(('td', 'class', 'postbody', lambda parser, attrs: parser.set_mode('postbody')))\n\n #TODO: add a link to the attachment to the end of the message\n self.listeners.append(('p', 'class', 'attachment', lambda parser, attrs: parser.set_mode(None)))\n self.listeners.append(('div', 'class', 'bbc-block', lambda parser, attrs: parser.post.open_quote()))\n self.post = None\n\n def handle_endtag(self, tag):\n if tag == 'blockquote' and self.post != None:\n self.post.message += ' }'\n ParserBase.handle_endtag(self, tag)\n\n def handle_postid(self, attrs):\n for attr in attrs:\n if attr[0] == 'id':\n postid = int(attr[1][4:])\n self.posts.append(Post(postid))\n self.post = self.posts[-1]\n return\n\n def read_thread(self, str):\n self.reset()\n self.feed(str)\n self.close()\n return self.posts\n\n def reset(self):\n self.posts = []\n self.post = None\n self.mode = None\n ParserBase.reset(self)\n\n def handle_data(self, data):\n if self.mode == 'author':\n self.post.author = data\n self.set_mode(None)\n elif self.mode == 'postbody':\n data = data.strip()\n if data != '':\n if self.post.message != '' and self.post.message[-1] != ' ':\n self.post.message += ' '\n self.post.message += data\n \nclass ForumParser(ParserBase):\n \"\"\"call read_forum with an html string to get back a list of threads\"\"\"\n\n def __init__(self):\n ParserBase.__init__(self)\n self.listeners.append(('a', 'class', 'thread_title', lambda parser, attrs: parser.set_mode('thread_title')))\n self.listeners.append(('td', 'class', 'author', lambda parser, attrs: parser.set_mode('author')))\n self.listeners.append(('a', 'class', 'count', lambda parser, attrs: parser.set_mode('unread')))\n self.listeners.append(('a', 'class', 'x', lambda parser, attrs: parser.thread.unread_zero()))\n self.listeners.append(('tr', 'class', 'thread', ForumParser.handle_threadid))\n self.listeners.append(('tr', 'class', 'thread seen', ForumParser.handle_threadid))\n self.threads = []\n\n def handle_threadid(self, attrs):\n for attr in attrs:\n if attr[0] == 'id':\n threadid = int(attr[1][6:])\n self.threads.append(Thread(threadid))\n self.thread = self.threads[-1]\n\n def read_forum(self, str):\n self.reset()\n self.feed(str)\n self.close()\n self.threads.reverse()\n return self.threads\n\n def reset(self):\n ParserBase.reset(self)\n self.set_mode(None)\n self.threads = []\n \n def handle_data(self, data):\n if self.mode == 'thread_title':\n self.thread.title = data\n self.set_mode(None)\n elif self.mode == 'author':\n self.thread.author = data\n self.set_mode(None)\n elif self.mode == 'unread':\n self.thread.unread = int(data)\n self.set_mode(None)\n","repo_name":"huge-sesh/posteur","sub_path":"parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"39836017472","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs import msg\nfrom ros_assignment1.msg import Chat\nfrom datetime import datetime\n\nlog = []\ndef push_data(lst,data):\n if data in lst:\n return False \n else:\n if len(lst) == 10:\n lst.pop(0)\n lst.append(data)\n return True\n\ndef print_log(lst):\n for index,data in enumerate(lst):\n if index == 0:\n print('\\nCHAT\\n-----------------------------------------------')\n date_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n source = data.source_id.data\n content = data.message.data\n print( date_str + ' ' + source + ': ' + content)\n if index == len(lst) - 1:\n print('-----------------------------------------------')\n\n\ndef callback(data):\n if push_data(log,data):\n print_log(log)\n print(\"Type new message below: \") \n\ndef chat():\n pub = rospy.Publisher('chatter',Chat,queue_size=10)\n rospy.Subscriber('chatter',Chat,callback)\n name = raw_input('What is your username? ')\n rospy.init_node(name)\n rate = rospy.Rate(10)\n \n while not rospy.is_shutdown():\n hello_str = raw_input('Type new message below: \\n')\n header = msg.Header()\n header.stamp = rospy.Time.now() \n source_id = msg.String(rospy.get_name())\n message = msg.String(hello_str)\n c = Chat(header,source_id,message)\n\n push_data(log,c)\n pub.publish(c)\n print_log(log)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n chat()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"raysonkoh/GroupChat-using-ROS","sub_path":"src/ros_assignment1/scripts/mychat.py","file_name":"mychat.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29439027529","text":"from odoo import models, fields\r\nfrom odoo import http\r\nimport requests\r\n\r\n\r\nclass Reserv(models.Model):\r\n _name = 'my_custom_module.reserv'\r\n _description = 'Reservation Entity'\r\n\r\n no_reservasi = fields.Char(string='No Reservasi', required=True, size=12)\r\n no_pendaftaran = fields.Char(string='No Pendaftaran', required=True, size=12)\r\n kd_poli = fields.Char(string='Kd Poli', size=2)\r\n norm = fields.Char(string='Norm', size=8)\r\n tgl_reservasi = fields.Datetime(string='Tgl Reservasi')\r\n tgl_daftar = fields.Datetime(string='Tgl Daftar')\r\n no_urut = fields.Char(string='No Urut', size=7)\r\n nama = fields.Char(string='Nama', size=35)\r\n kd_dokter = fields.Char(string='Kd Dokter', size=7)\r\n kd_caramasuk = fields.Char(string='Kd Caramasuk', size=2)\r\n nm_telp = fields.Char(string='Nm Telp', size=35)\r\n no_telp1 = fields.Char(string='No Telp1', size=20)\r\n sts_batal = fields.Boolean(string='Sts Batal')\r\n sts_pagi = fields.Boolean(string='Sts Pagi')\r\n kd_jns_carabayar = fields.Char(string='Kd Jns Carabayar', size=2)\r\n kd_bayar = fields.Char(string='Kd Bayar', size=3)\r\n modified_by = fields.Char(string='Modified By')\r\n modified_at = fields.Datetime(string='Modified At', readonly=True, auto_now=True)\r\n\r\n class ReservationController(http.Controller):\r\n @http.route('/odoo_rest_module/reservation', type='json', auth='public', methods=['POST'])\r\n def create_reservation(self, **post):\r\n no_reservasi = post.get('no_reservasi')\r\n no_pendaftaran = post.get('no_pendaftaran')\r\n kd_poli = post.get('no_kd_poli')\r\n norm = post.get('norm')\r\n tgl_reservasi = post.get('tgl_reservasi')\r\n tgl_daftar = post.get('tgl_daftar')\r\n no_urut = post.get('no_urut')\r\n nama = post.get('nama')\r\n kd_dokter = post.get('kd_dokter')\r\n kd_caramasuk = post.get('kd_caramasuk')\r\n nm_telp = post.get('nm_telp')\r\n no_telp1 = post.get('no_telp1')\r\n sts_batal = post.get('sts_batal')\r\n sts_pagi = post.get('sts_pag')\r\n kd_jns_carabayar = post.get('kd_jns_carabayar')\r\n kd_bayar = post.get('kd_bayar')\r\n modified_by = post.get('modified_by')\r\n modified_at = post.get('modified_at')\r\n\r\n payload = {\r\n 'NO_RESERVASI': no_reservasi,\r\n 'NO_PENDAFTARAN': no_pendaftaran,\r\n 'KD_POLI': kd_poli,\r\n 'NORM': norm,\r\n 'TGL_RESERVASI': tgl_reservasi,\r\n 'TGL_DAFTAR': tgl_daftar,\r\n 'NO_URUT': no_urut,\r\n 'NAMA': nama,\r\n 'KD_DOKTER': kd_dokter,\r\n 'KD_CARAMASUK': kd_caramasuk,\r\n 'NM_TELP': nm_telp,\r\n 'NO_TELP': no_telp1,\r\n 'STS_BATAL': sts_batal,\r\n 'STS_PAGI': sts_pagi,\r\n 'KD_JNS_CARABAYAR': kd_jns_carabayar,\r\n 'KD_BAYAR': kd_bayar,\r\n 'MODIFIEDBY': modified_by,\r\n 'MODIFIEDAT': modified_at,\r\n }\r\n\r\n api_endpoint = 'http://localhost:8080/api/reservation'\r\n response = requests.post(api_endpoint, json=payload)\r\n if response.status_code == 201:\r\n result = response.json()\r\n return {'success': True, 'message': 'Reservation created successfully.'}\r\n else:\r\n error_message = response.json().get('message', 'Unknown error occurred.')\r\n return {'success': False, 'message': error_message}\r\n","repo_name":"titishaq/AssignmentAPI","sub_path":"reservation_api_test_updated/models/reserv_models.py","file_name":"reserv_models.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31419790203","text":"import timeit\n\n\ndef solutionA(N):\n primes = set(range(3, N, 2))\n primes.add(2)\n i = 3\n while i < len(primes):\n for j in range(i*i, N, i):\n if j in primes:\n primes.remove(j)\n i += 2\n return primes\n\n\ndef solutionB(N):\n sieve = [False, True] * (N//2 + 1)\n sieve[1] = False\n sieve[2] = True\n i = 3\n while (i*i <= N):\n if sieve[i] == True:\n for j in range(i*i, N+1, i):\n sieve[j] = False\n i += 2\n primes = []\n for i in range(N):\n if sieve[i]:\n primes.append(i)\n return primes\n\n\nprint(solutionA(100))\nprint(solutionB(100))\n\nstarttime = timeit.default_timer()\nsolutionA(10000000)\nprint(timeit.default_timer() - starttime)\n\nstarttime = timeit.default_timer()\nsolutionB(10000000)\nprint(timeit.default_timer() - starttime)\n","repo_name":"heldersepu/hs-scripts","sub_path":"Python/codility/Eratosthenes.py","file_name":"Eratosthenes.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"47"} +{"seq_id":"40681963378","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @author: T\n\nimport csv\nimport os\nimport sys\nfrom google_images_download import google_images_download\n\n# 实例化一个下载器\ndownloader = google_images_download.googleimagesdownload()\n\ncsv_file = csv.reader(open('keywords.csv', encoding='utf-8'))\n\ndef download_images(csv_file):\n\t'''\n\tcsv_file: 关键字文件\n\tlimit: 最大数目\n\tprint_urls: 显示路径\n\tchromedriver: 安装路径\n\toutput_directory: 保存位置\n\t'''\n\tfor word in csv_file:\n\t\targuments = {\n\t\t\t'keywords': str(word), \n\t\t\t'limit': 2, \n\t\t\t'print_urls': True, \n\t\t\t'chromedriver': r'D:\\chromedriver_win32\\chromedriver.exe',\n\t\t\t'output_directory': ''}\n\t\tdownloader.download(arguments)\n\nif __name__ == '__main__':\n\tdownload_images(csv_file)\n","repo_name":"t-dawei/google_images","sub_path":"google_download.py","file_name":"google_download.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21494499958","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\ncomparison_qrs_detectors.version\r\n-------\r\n\r\nA Version tracker.\r\n\r\n:copyright: (c) 2021 by Open Innovation Lab\r\n:license: BSD 3-clause, see LICENSE for more details.\r\n\"\"\"\r\n\r\nVERSION = (0, 0, 1)\r\n__version__ = \".\".join(map(str, VERSION))\r\n","repo_name":"Andrew1021/Comparison-QRS-Detectors","sub_path":"comparison_qrs_detectors/comparison_qrs_detectors/__version__.py","file_name":"__version__.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71157187983","text":"\"\"\"\nThis file is part of KIGM-Discord-Bot.\n\nKIGM-Discord-Bot is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nKIGM-Discord-Bot is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with KIGM-Discord-Bot. If not, see .\n\"\"\"\n\nimport os\n\nimport dbl\nfrom discord.ext import commands\n\n\n# For the thing uh the @ decorator thing u\n# put on top of the function, yea yea\ndef support_server_only():\n async def predicate(ctx):\n if ctx.guild.id == 770558935144726528:\n return True\n\n await ctx.send(\n \"This command is exclusively **for the support server only.**\\nSo here's link of the support server then! **https://discord.gg/jz4WxkB **\"\n )\n return False\n\n return commands.check(predicate)\n\n\ndef cmd_has_blacklist():\n async def get_bl(ctx):\n cmdbl_data = await ctx.bot.bl.find(ctx.command.name)\n if \"Blacklisted\" in cmdbl_data:\n if ctx.author.id not in cmdbl_data[\"Blacklisted\"]:\n return True\n\n await ctx.error(\n \"You are currently *blacklisted* from using this command.\"\n )\n return False\n\n return commands.check(get_bl)\n\n\ndef voters_only():\n async def check_voted(ctx):\n j = dbl.DBLClient(ctx.bot, os.environ.get(\"DBL_SECRET\"))\n usr_vote = await j.get_user_vote(ctx.author.id)\n\n await j.close() # idk I get annoyed sometimes with the warnings on the console\n\n if usr_vote:\n return True\n\n await ctx.send(\n \"oops! It seems like this command is for **__voters only.__**\\nIf you want to use this command just **vote me on top.gg!**\\nVote link: **https://top.gg/bot/763626077292724264/vote **\"\n )\n return False\n\n return commands.check(check_voted)\n","repo_name":"Makiyu-py/KIGM-Discord-Bot","sub_path":"core/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"18544842407","text":"import os\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nLAZONA_CHECKEDIN_URL = 'https://lazona.co/es/directory/members?onlycheckedin=true'\nTNP_MEMBERS_SLACK_IDS = {\n 'Daniel Luque Quintana': '',\n 'Miguel Ángel Calero Fernández': '',\n 'Javier Aguirre': '',\n 'Nieves María Borrero Barea': '',\n 'Natalia Moreno Arévalo': ''\n}\nSLACK_API_KEY = os.environ.get('SLACK_API_KEY')\nSLACK_USER_PROFILE_SET_ENDPOINT = 'https://theneonproject.slack.com/api/users.profile.set'\nSLACK_LAZONA_PAYLOAD = {\n\t\"user\": '',\n\t\"profile\": {\n \"status_text\": \"En la Zona\",\n \"status_emoji\": \":zona:\",\n \"status_expiration\": 0\n\t}\n}\nSLACK_NOTLAZONA_PAYLOAD = {\n\t\"user\": '',\n\t\"profile\": {\n \"status_text\": \"\",\n \"status_emoji\": \"\",\n \"status_expiration\": 0\n\t}\n}\nSTATUSES = {\n 'zona': SLACK_LAZONA_PAYLOAD,\n 'notzona': SLACK_NOTLAZONA_PAYLOAD\n}\nHEADERS = {\n 'Content-Type': 'application/json; charset=utf-8',\n 'Authorization': ' '.join(['Bearer', SLACK_API_KEY])\n}\n\n\ndef set_status_slack(user_id, status):\n payload = STATUSES[status].copy()\n payload['user'] = user_id\n requests.post(SLACK_USER_PROFILE_SET_ENDPOINT, json=payload, headers=HEADERS)\n\ndef get_people_lazona():\n req = requests.get(LAZONA_CHECKEDIN_URL)\n status_code = req.status_code\n\n if status_code == 200:\n html = BeautifulSoup(req.text, \"html.parser\")\n members = html.find_all('h3', {'class': 'user-badge__name'})\n\n return [member.getText().strip() for member in members]\n\n\ndef is_in_lazona(member_name, members):\n return member_name in TNP_MEMBERS_SLACK_IDS.keys()\n\n\ndef main():\n members = get_people_lazona()\n tnp_members_zona = []\n\n for member_name in members:\n if is_in_lazona(member_name, members):\n set_status_slack(TNP_MEMBERS_SLACK_IDS[member_name], 'zona')\n tnp_members_zona.append(member_name)\n\n members_out = [\n member_name\n for member_name in TNP_MEMBERS_SLACK_IDS.keys()\n if member_name not in tnp_members_zona\n ]\n\n if members_out:\n for member_name in members_out:\n set_status_slack(TNP_MEMBERS_SLACK_IDS[member_name], 'notzona')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Wealize/set-profile-status-slack","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10443296626","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse,Http404\nfrom django.http import HttpResponseRedirect,JsonResponse\nfrom .models import *\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.core.exceptions import ObjectDoesNotExist\n#from .email import send_welcome_email\n\n\n\ndef index(request):\n locs = Other_loc.objects.all()\n rooms = Room.objects.all()\n buildings = Building.objects.all()\n events = Event.objects.all()\n context = {\n 'rooms':rooms,\n 'buildings':buildings,\n 'events' : events,\n 'locs':locs\n }\n return render(request,'index.html',context)\n\n@login_required(login_url='/accounts/login/')\ndef about_us(request):\n return render(request,'about.html')\n\ndef search_results(request):\n\n if 'email' in request.GET and request.GET[\"email\"]:\n email = request.GET.get(\"email\")\n user_loc = request.GET.get(\"currentloc\")\n source = Room.objects.get(name=user_loc)\n user_destination = request.GET.get(\"destination\")\n destination = Room.objects.get(name=user_destination)\n try:\n user_email = Location_Access.objects.get(user_email=email, location = destination)\n directions = Direction.objects.get(source = source, destination = destination)\n directions = directions\n return render(request, 'search.html',{\"directions\":directions})\n except ObjectDoesNotExist:\n message = f\"Invalid access key to {destination} room\"\n return render(request, 'search.html',{\"message\":message})\n\n elif 'currentloc' in request.GET and request.GET[\"currentloc\"]:\n user_loc = request.GET.get(\"currentloc\")\n source = Room.objects.get(name=user_loc)\n if 'destination' in request.GET and request.GET[\"destination\"]:\n user_destination = request.GET.get(\"destination\")\n destination = Room.objects.get(name=user_destination)\n if destination.accessible == False:\n message = f\"{destination} room is not accessible to the public\"\n return render(request, 'search.html',{\"message\":message, \"user_loc\":user_loc, \"destination\":destination})\n try:\n directions = Direction.objects.get(source = source, destination = destination)\n directions = directions\n return render(request, 'search.html',{\"directions\":directions})\n except ObjectDoesNotExist:\n message = \"There is no direction for the entered location\"\n return render(request, 'search.html',{\"message\":message, \"user_loc\":user_loc})\n\n elif 'event' in request.GET and request.GET[\"event\"]:\n event = request.GET.get(\"event\")\n event_obj = Event.objects.get(name = event)\n event_venue = event_obj.venue\n destination = Room.objects.get(name=event_venue)\n if destination.accessible == False:\n message = f\"{destination} room is not accessible to the public\"\n return render(request, 'search.html',{\"message\":message, \"user_loc\":user_loc,\"destination\":destination, \"event\":event_obj})\n directions = Direction.objects.get(source = source, destination = destination)\n directions = directions\n if directions:\n try:\n directions = directions\n return render(request, 'search.html',{\"directions\":directions, \"event\":event_obj})\n except ObjectDoesNotExist:\n message = \"There is no direction for the entered location\"\n return render(request, 'search.html',{\"message\":message, \"user_loc\":user_loc })\n \n\n \n else:\n return render(request, 'search.html',{\"user_loc\":user_loc})\n \n\n else:\n return render(request, 'search.html')\n\n ","repo_name":"demarillacizere/alumap","sub_path":"map/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6635133712","text":"from collections import deque\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\ndef bfs_water_jug(jug1_capacity, jug2_capacity, target_amount):\r\n\r\n queue = deque([(0, 0, [])])\r\n visited = set()\r\n visited.add((0, 0))\r\n tree = {}\r\n\r\n while queue:\r\n jug1_amount, jug2_amount, actions = queue.popleft()\r\n current_state = (jug1_amount, jug2_amount)\r\n\r\n if jug1_amount == target_amount or jug2_amount == target_amount:\r\n return tree, actions\r\n\r\n next_states = [\r\n (jug1_capacity, jug2_amount, actions + ['fill_jug1']),\r\n (jug1_amount, jug2_capacity, actions + ['fill_jug2']),\r\n (0, jug2_amount, actions + ['empty_jug1']),\r\n (jug1_amount, 0, actions + ['empty_jug2']),\r\n (jug1_amount - min(jug1_amount, jug2_capacity - jug2_amount),\r\n jug2_amount + min(jug1_amount, jug2_capacity - jug2_amount),\r\n actions + ['pour_jug1_to_jug2']),\r\n (jug1_amount + min(jug2_amount, jug1_capacity - jug1_amount),\r\n jug2_amount - min(jug2_amount, jug1_capacity - jug1_amount),\r\n actions + ['pour_jug2_to_jug1'])\r\n ]\r\n\r\n tree[current_state] = {}\r\n for next_jug1_amount, next_jug2_amount, next_actions in next_states:\r\n if (next_jug1_amount, next_jug2_amount) not in visited:\r\n queue.append((next_jug1_amount, next_jug2_amount, next_actions))\r\n visited.add((next_jug1_amount, next_jug2_amount))\r\n next_state = (next_jug1_amount, next_jug2_amount)\r\n tree[current_state][next_state] = next_actions[-1]\r\n\r\n return tree, []\r\n\r\ndef print_solution_tree(tree, node, level=0):\r\n if node in tree:\r\n print(\" \" * level + f\"{node}\")\r\n for child_node, action in tree[node].items():\r\n print(\" \" * (level + 1) + f\"{action} -> {child_node}\")\r\n print_solution_tree(tree, child_node, level + 2)\r\n\r\ndef draw_states(tree, path_taken):\r\n # Draw the graph manually and save as an image\r\n plt.figure(figsize=(12, 8))\r\n pos = {}\r\n\r\n for parent, children in tree.items():\r\n if parent not in pos:\r\n pos[parent] = (parent[0], parent[1])\r\n for child in children:\r\n if child not in pos:\r\n pos[child] = (child[0], child[1])\r\n if (parent, child) in path_taken or (child, parent) in path_taken:\r\n plt.plot([parent[0], child[0]], [parent[1], child[1]], 'r--', lw=2, label='Path Taken')\r\n else:\r\n plt.plot([parent[0], child[0]], [parent[1], child[1]], 'b-', lw=1)\r\n\r\n plt.scatter([pos[coord][0] for coord in pos], [pos[coord][1] for coord in pos], s=1000, c='lightblue', edgecolors='black', linewidths=1)\r\n plt.xlabel('Jug 1 (liters)')\r\n plt.ylabel('Jug 2 (liters)')\r\n plt.title(\"Water Jug Problem Solution Tree\")\r\n plt.legend()\r\n plt.grid(True)\r\n plt.axis('equal')\r\n\r\n # Save the graph as an image file\r\n plt.savefig('water_jug_solution.png')\r\n plt.show()\r\n\r\ndef get_user_input():\r\n try:\r\n jug1_capacity = int(input(\"Enter the capacity of Jug 1 (in liters): \"))\r\n jug2_capacity = int(input(\"Enter the capacity of Jug 2 (in liters): \"))\r\n target_amount = int(input(\"Enter the target amount (in liters): \"))\r\n return jug1_capacity, jug2_capacity, target_amount\r\n except ValueError:\r\n print(\"Invalid input. Please enter valid integers.\")\r\n return get_user_input()\r\n\r\n\r\n# Example Usage:\r\nprint(\"Water Jug Problem Solver\")\r\njug1_capacity, jug2_capacity, target_amount = get_user_input()\r\n\r\nsolution_tree, actions = bfs_water_jug(jug1_capacity, jug2_capacity, target_amount)\r\n\r\nprint(\"\\nSolution Path:\")\r\nfor i, action in enumerate(actions):\r\n print(f\"{i + 1}. {action}\")\r\n\r\nprint(\"\\nSolution Tree:\")\r\nprint_solution_tree(solution_tree, (0, 0))","repo_name":"Arnav-arw/PracticalLab-Sem5th","sub_path":"AISC/Lab 1/Lab1_WaterJugGraph.py","file_name":"Lab1_WaterJugGraph.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"9232046266","text":"import csv \n \nfilename = \"aapl.csv\"\n \nfields = [] \nrows = [] \n \nwith open(filename, 'r') as csvfile: \n csvreader = csv.reader(csvfile) \n \n fields = next(csvreader) \n \n for row in csvreader: \n rows.append(row) \n \n print(\"Total no. of rows: %d\"%(csvreader.line_num)) \n \n","repo_name":"pnjha/Text_Summarization","sub_path":"data/data_vis.py","file_name":"data_vis.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70859228942","text":"import argparse\nimport numpy as np\nimport matplotlib\nimport sys\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\ndef main(args):\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', dest='name', type=str, default='')\n parser.add_argument('--plot', dest='plot', type=bool, default=True)\n args = parser.parse_args()\n name = args.name\n training_episode = np.load('result/{}_training_ep.npy'.format(name))\n reward_mean= np.load('result/{}_reward_mean.npy'.format(name))\n reward_error= np.load('result/{}_reward_std.npy'.format(name))\n test_accuracy = np.load('result/{}_test_acc.npy'.format(name))\n\n print(training_episode)\n print(reward_mean)\n print(reward_error)\n print(test_accuracy)\n\n for i in range(len(reward_mean)):\n if reward_mean[i] >= 200:\n print(i)\n print(reward_mean[i])\n break\n\n max_acc = -float('Inf')\n max_idx = -1\n for i, score in enumerate(test_accuracy):\n if score > max_acc:\n max_acc = score\n max_idx = i\n print(max_idx)\n print(max_acc)\n print(test_accuracy[max_idx])\n\n\n if args.plot:\n # plt.errorbar(training_episode, reward_mean, reward_error)\n # plt.xlabel('Training Episode')\n # plt.ylabel('Cumulative Reward')\n # plt.savefig('plt/{}_reward.png'.format(name))\n plt.plot(training_episode, test_accuracy)\n plt.xlabel('Training Episode')\n plt.ylabel('test accuracy')\n plt.savefig('plt/{}_acc.png'.format(name))\n\n plt.gcf().clear()\n\n plt.plot(training_episode, reward_mean)\n plt.xlabel('Training Episode')\n plt.ylabel('test mean reward')\n plt.savefig('plt/{}_reward.png'.format(name))\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"yimingw2/10703_project","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32735202892","text":"from random import randint\n\n\n\nclass Board :\n #initializing the board using a list\n def __init__(self):\n self.squares = [' '] * 10\n\n #printing the squares of board by list index\n def printBoard(self):\n print('')\n print (self.squares[1], ' |', self.squares[2], ' |', self.squares[3])\n print('------------')\n print (self.squares[4], ' |', self.squares[5], ' |', self.squares[6])\n print('------------')\n print (self.squares[7], ' |', self.squares[8], ' |', self.squares[9])\n\n\n def updateBoard(self,square_number, move):\n if self.squares[square_number] == ' ': \n self.squares[square_number] = move\n \n def checkForMove(self, char,s1,s2,s3):\n if self.squares[s1] == char and self.squares[s2] == char and self.squares[s3] == char:\n return True\n\n def checkWins(self,char):\n if self.checkForMove(char, 1,2,3):\n return True\n if self.checkForMove(char, 4,5,6):\n return True\n if self.checkForMove(char, 7,8,9):\n return True\n if self.checkForMove(char, 1,4,6):\n return True\n if self.checkForMove(char, 3,6,9):\n return True\n if self.checkForMove(char, 1,5,7):\n return True\n if self.checkForMove(char, 3,5,9):\n return True \n\n for i in self.squares:\n if i == i :\n return False\n else:\n print('Try again')\n self.printBoard()\n\n \n\n\nclass Game:\n\n def welcome(self):\n print('Welcome to TIC TAC TOE')#header\n\n def refresh(self): #self refers to Game class\n os.system('clear')\n self.welcome()\n b = Board()#instance of the Board Class\n b.printBoard() \n\ndef main():\n\n b = Board()\n g = Game()\n\n g.refresh()\n while True:\n \n your_move = int(input('\\n[X] Choose between 1-9 >>> '))\n b.updateBoard(your_move, 'X')\n b.printBoard()\n \n print('[O] COMPUTER\\'S TURN')\n computer_move = randint(1,10)\n b.updateBoard(computer_move, 'O')\n b.printBoard()\n \n if b.checkWins('O') == True:\n print('COMPUTER WINS!')\n break\n elif b.checkWins('X')== True:\n print('YOU WIN!')\n break\n \n \n \nif __name__ == \"__main__\":\n main()","repo_name":"vanessaLatefa/tictactoe","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21986576210","text":"print(\"Dados dos numeros enteros retorna su producto solo si es mayor que 1000, en caso \"\r\n \"contrario retorna la suma.\\n\")\r\nnumber_one = int(input(\"Digite el primer numero: \"))\r\nnumber_two = int(input(\"Digite el segundo numero: \"))\r\nmultiplication = number_one * number_two\r\ntotal = number_one + number_two\r\nif multiplication > 1000:\r\n print(\"La multiplicacion es mayor que 1000. Su multiplicacion es: \" + str(multiplication))\r\nelif multiplication < 1000:\r\n print(\"La multiplicacion es menor que 1000. Su suma es: \" + str(total))\r\nelse:\r\n print(\"La multiplicacion es igual a 1000. Su suma es: \" + str(total))\r\n","repo_name":"Yamir-Haidar/Python","sub_path":"Ejercicio_1.py","file_name":"Ejercicio_1.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3346113613","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Michael Liao'\n\n'''\nRemote management.\n'''\n\nimport os\n\nfrom datetime import datetime\nfrom fabric.api import *\n\nenv.user = 'root'\nenv.hosts = ['a.shi-ci.com']\n\ndef _current_path():\n return os.path.abspath('.')\n\n#####################\n# search.shi-ci.com #\n#####################\n\n_SEARCH_TAR_FILE = 'search.shi-ci.com.tar.gz'\n\ndef build_search():\n lpath = os.path.join(_current_path(), 'search.shi-ci.com', 'web')\n lfile = os.path.join(_current_path(), _SEARCH_TAR_FILE)\n with lcd(lpath):\n local('rm -f %s' % lfile)\n local('tar --dereference -czvf %s WEB-INF' % lfile)\n\n_REMOTE_SEARCH_TMP_TAR = '/tmp/%s' % _SEARCH_TAR_FILE\n_REMOTE_SEARCH_DIST_LINK = '/srv/search.shi-ci.com/www'\n_REMOTE_SEARCH_DIST_DIR = '/srv/search.shi-ci.com/www-%s' % datetime.now().strftime('%y-%m-%d_%H.%M.%S')\n\ndef scp_search():\n run('rm -f %s' % _REMOTE_SEARCH_TMP_TAR)\n put(os.path.join(_current_path(), _SEARCH_TAR_FILE), _REMOTE_SEARCH_TMP_TAR)\n run('mkdir %s' % _REMOTE_SEARCH_DIST_DIR)\n with cd(_REMOTE_SEARCH_DIST_DIR):\n run('tar -xzvf %s' % _REMOTE_SEARCH_TMP_TAR)\n run('chown -R jetty:jetty %s' % _REMOTE_SEARCH_DIST_DIR)\n run('rm -f %s' % _REMOTE_SEARCH_DIST_LINK)\n run('ln -s %s %s' % (_REMOTE_SEARCH_DIST_DIR, _REMOTE_SEARCH_DIST_LINK))\n run('chown jetty:jetty %s' % _REMOTE_SEARCH_DIST_LINK)\n with settings(warn_only=True):\n run('/etc/init.d/jetty stop')\n run('/etc/init.d/jetty start')\n\n##################\n# www.shi-ci.com #\n##################\n\n_WWW_TAR_FILE = 'www.shi-ci.com.tar.gz'\n\ndef build_www():\n def _exclude(fname):\n return fname.startswith('.') or fname.endswith('.pyc') or fname.endswith('.pyo') or fname.endswith('.gz')\n lpath = os.path.join(_current_path(), 'www.shi-ci.com')\n lfile = os.path.join(_current_path(), _WWW_TAR_FILE)\n with lcd(lpath):\n files = os.listdir(lpath)\n includes = [f for f in files if not _exclude(f)]\n excludes = ['.*', '*.pyc', '*.pyo', '*.psd']\n local('rm -f %s' % lfile)\n cmd = ['tar', '--dereference', '-czvf', lfile]\n cmd.extend(['--exclude=\\'%s\\'' % ex for ex in excludes])\n cmd.extend(includes)\n local(' '.join(cmd))\n\n_REMOTE_WWW_TMP_TAR = '/tmp/%s' % _WWW_TAR_FILE\n_REMOTE_WWW_DIST_LINK = '/srv/www.shi-ci.com/www'\n_REMOTE_WWW_DIST_DIR = '/srv/www.shi-ci.com/www-%s' % datetime.now().strftime('%y-%m-%d_%H.%M.%S')\n\ndef scp_www():\n run('rm -f %s' % _REMOTE_WWW_TMP_TAR)\n put(os.path.join(_current_path(), _WWW_TAR_FILE), _REMOTE_WWW_TMP_TAR)\n run('mkdir %s' % _REMOTE_WWW_DIST_DIR)\n with cd(_REMOTE_WWW_DIST_DIR):\n run('tar -xzvf %s' % _REMOTE_WWW_TMP_TAR)\n run('chown -R www-data:www-data %s' % _REMOTE_WWW_DIST_DIR)\n run('rm -f %s' % _REMOTE_WWW_DIST_LINK)\n run('ln -s %s %s' % (_REMOTE_WWW_DIST_DIR, _REMOTE_WWW_DIST_LINK))\n run('chown www-data:www-data %s' % _REMOTE_WWW_DIST_LINK)\n with settings(warn_only=True):\n run('supervisorctl stop shici')\n run('supervisorctl start shici')\n","repo_name":"wangyudi/shi-ci","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13114999251","text":"try: # Используем для проверки на корректность ввода данных\n a = int(input()) # Ввод значения a\n if a<0: # Используем для проверки на отрицательность\n print(\"Введено некорректное значение\")\n else:\n print((a%10) * 100 + (a%100//10) * 10 + (a//100)) # Разбираем введённое значение по рпзрядам и выводим в нужном нам порядке (обратном) \nexcept:\n print(\"Ошибка\") \n \n # Добавил коментарии 12.11.22 в 00.13 (Забыл про них в начале, извиняюсь. Дату размещения PZ можно проверить по файлу init)\n","repo_name":"LevMitkov/Mitkov1","sub_path":"PZ_2/PZ_2_1.py","file_name":"PZ_2_1.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20766394898","text":"\"\"\"\nModule containing utility functions for PLOUF.\n\nAttributes:\n DEFAULT_NODE_COLOR (hou.Color): Default node color from Houdini (light-gray).\n FRAME_SEQUENCE_REGEX (str): Regex: matches the last sequence of digits in a string.\n Used to assert the frame number in a filename.\n PLOUF_PROJECT_ENV (str): Houdini Environment variable name pointing to the project root path. (Usually set in the plouf_env.json)\n PLOUF_ROOT_ENV (str): Houdini Environment variable name of the project name. (Usually set in the plouf_env.json)\n PUBLISHED_COLOR (hou.Color): Color used on published node that will overwrite an existing publish.\n BLACKLISTED_WORDS (list): List of words (str) blacklisted by the syncro tool (Resilio).\n BLACKLISTED_WORDS_REPLACEMENT (list): List of words (str) to replace blacklisted words in BLACKLISTED_WORDS when auto resolving thoses. Indexes should match between the two lists.\n HIP_LIC (dict): Key: hou.licenseCategoryType object, Value: Type of hipfile (ie: 'hip', 'hipnc', 'hiplc')\n\"\"\"\n\n\n# Imports\n\nimport os\nimport re\nimport hou\nimport subprocess\nfrom collections import defaultdict\n\n\n# Constants\n\nFRAME_SEQUENCE_REGEX = r\"\\d+(?!.*\\d+)\"\n\nPUBLISHED_COLOR = hou.Color((0.74, 0.45, 0.91))\nDEFAULT_NODE_COLOR = hou.Color((0.84, 0.84, 0.84))\n\nPLOUF_ROOT_ENV = 'PLOUF_ROOT'\nPLOUF_PROJECT_ENV = 'PLOUF_PROJECT'\n\nBLACKLISTED_WORDS = ['work']\nBLACKLISTED_WORDS_REPLACEMENT = ['wrk']\n\nHIP_LIC = {\n hou.licenseCategoryType.Education: 'hipnc',\n hou.licenseCategoryType.ApprenticeHD: 'hipnc',\n hou.licenseCategoryType.Apprentice: 'hipnc',\n hou.licenseCategoryType.Indie: 'hiplc',\n hou.licenseCategoryType.Commercial: 'hip'\n}\n\n\n# Functions\n\ndef getRoot() -> str:\n \"\"\"\n Gets the root path of the PLOUF project from houdini env variable.\n Uses PLOUF_ROOT_ENV const.\n\n Returns:\n str: Path of the project root.\n \"\"\"\n return hou.getenv(PLOUF_ROOT_ENV)\n\n\ndef getProject() -> str:\n \"\"\"\n Gets the project name of the PLOUF project from houdini env variable.\n Uses PLOUF_PROJECT_ENV const.\n\n Returns:\n str: Project name.\n \"\"\"\n return hou.getenv(PLOUF_PROJECT_ENV)\n\n\ndef setRoot(path: str):\n \"\"\"\n Sets the PLOUF root path environement variable.\n\n Args:\n path (str): Root path to set.\n \"\"\"\n hou.putenv(PLOUF_ROOT_ENV, path)\n\n\ndef setProject(name: str):\n \"\"\"\n Sets the PLOUF project name environement variable.\n\n Args:\n name (str): Project name to set.\n \"\"\"\n hou.putenv(PLOUF_PROJECT_ENV, name)\n\n\ndef explore(path: str):\n \"\"\"\n Opens the windows explorer at the specified path (if the path is valid).\n\n Args:\n path (str): Path at which the explorer will open.\n \"\"\"\n if os.path.isdir(path):\n path = os.path.normpath(path)\n subprocess.Popen(\"explorer \" + path)\n\n\ndef formatString(string: str) -> str:\n \"\"\"\n Formats or \"sanitize\" a string by replacing any illegal character with '_'. Also makes it lowercase.\n\n Args:\n string (str): The string to format.\n\n Returns:\n str: The formated string.\n \"\"\"\n string = string.lower()\n string = hou.text.variableName(string)\n\n return string\n\n\ndef formatPath(path: str, collaspeVariables=list()) -> str:\n \"\"\"\n Formats or \"sanitize\" a path.\n Can also collaspe variables (hou.text.collapseCommonVars()).\n\n Args:\n path (str): Path to format\n collaspeVariables (list, optional): List of houdini variables (str) (ie: ['$HIP']) to collapse.\n\n Returns:\n str: formatted path\n \"\"\"\n formatted_path = hou.text.normpath(path)\n formatted_path = hou.text.collapseCommonVars(path, vars=collaspeVariables)\n\n return formatted_path\n\n\ndef formatFileList(files: list, collapseSequences=True, collaspeVariables=list()) -> list:\n \"\"\"\n Formats a list of file paths by removing duplicates, sorting it and normalizing the paths.\n Optionaly it can collapse files sequences into this format: 'path/to/file[01-99].jpg'\n Can also collaspe variables (hou.text.collapseCommonVars()).\n\n Args:\n files (list): List of files path (str) to operate on.\n collapseSequences (bool, optional): If True collapse file sequences. Default is False\n collaspeVariables (list, optional): List of houdini variables (str) (ie: ['$HIP']) to collapse.\n\n Returns:\n list: Formatted list of file paths.\n \"\"\"\n files = list(dict.fromkeys(files)) # Remove duplicates\n files.sort()\n\n if all(isinstance(item, str) for item in files):\n\n for index, file in enumerate(files):\n # file = file.replace('\\\\', '/') Find a way to remove backslashes\n files[index] = formatPath(file, collaspeVariables)\n\n if collapseSequences:\n formated_files = list()\n sequences = defaultdict(list)\n\n for file in files:\n dirname = os.path.dirname(file)\n filename = os.path.basename(file)\n\n match = re.findall(FRAME_SEQUENCE_REGEX, filename)\n\n if not match:\n sequences[file] = list()\n continue\n\n else:\n frame = match[-1]\n filename = re.sub(FRAME_SEQUENCE_REGEX, '{}', filename)\n sequence = os.path.join(dirname, filename)\n sequences[sequence].append(frame)\n\n for sequence, frames in sequences.items():\n if not frames:\n formated_files.append(sequence)\n continue\n\n frames.sort()\n first_frame = frames[0]\n last_frame = frames[-1]\n\n if first_frame is last_frame:\n frame_range = first_frame\n\n else:\n frame_range = f'[{first_frame}-{last_frame}]'\n\n formated_sequence = sequence.format(frame_range)\n\n formated_files.append(formated_sequence)\n\n return formated_files\n return files\n\n\ndef menuFromDir(path: str) -> list:\n \"\"\"\n Creates a list of valid directory found in path.\n Each directory found is writting twice in the resulting list.\n This is made for creating Python driven menu scripts in Houdini HDAs interface.\n\n Args:\n path (str): The path to look for directories\n\n Returns:\n list: The list of directories (doubled) in path.\n \"\"\"\n menuitem = list()\n\n if os.path.isdir(path):\n dirs = os.listdir(path)\n for d in dirs:\n if not d.startswith(\".\"):\n if os.path.isdir(os.path.join(path, d)):\n menuitem.append(d)\n menuitem.append(d)\n\n menuitem.sort()\n return menuitem\n\n\ndef setColorState(node: hou.Node, publish):\n \"\"\"\n Changes the Node's color if the publish files exists or not.\n\n Args:\n node (hou.Node): Node to change the color of.\n publish (plouf.Publish): Publish object.\n \"\"\"\n if publish.isPublished():\n node.setColor(PUBLISHED_COLOR)\n\n else:\n node.setColor(DEFAULT_NODE_COLOR)\n\n\ndef assetPublishState(path: str, root=getRoot()) -> tuple:\n \"\"\"\n Checks if a asset at the specified path exist, and if it is published: avaible in the pipeline (In the root path, and doesn't contain blacklisted words).\n\n Args:\n path (str): Asset path\n root (str, optional): Pipeline/Project root, default to getRoot() value\n\n Returns:\n tuple: a tuple containing a bool (True: the asset is published, False: it is not)\n and a message (str) as why the asset is not published.\n \"\"\"\n path = path.lower()\n root = root.lower()\n if path.startswith('op:'):\n reason_msg = \"Internal OpPath, disregards\"\n state = True\n\n elif path.startswith('anon:'):\n reason_msg = \"Anonymous Layer, disregards\"\n state = True\n\n elif '' in path or '' in path:\n reason_msg = \"Exception : UDIM Tags\"\n state = False\n\n elif os.path.isfile(path):\n path = os.path.abspath(path)\n root = os.path.abspath(root)\n\n if not path.startswith(root):\n reason_msg = \"Asset not in project root\"\n state = False\n\n elif any(word in path.lower() for word in BLACKLISTED_WORDS):\n reason_msg = f\"Asset contains any of blacklisted words: {BLACKLISTED_WORDS}\"\n state = False\n\n else:\n reason_msg = \"Asset is published\"\n state = True\n\n else:\n reason_msg = \"Asset doesn't exists\"\n state = False\n\n return (state, reason_msg)\n\n\ndef resolveBlacklistedWords(string: str) -> str:\n \"\"\"\n Replaces blacklisted words by their replacements\n See : BLACKLISTED_WORDS, BLACKLISTED_WORDS_REPLACEMENT constant descriptions.\n\n Args:\n string (str): String to process.\n\n Returns:\n str: String with eventual blacklisted words replaced.\n \"\"\"\n for word, replc in zip(BLACKLISTED_WORDS, BLACKLISTED_WORDS_REPLACEMENT):\n string.replace(word, replc)\n\n return string\n\n\ndef hipFileType() -> str:\n \"\"\"\n Returns the Hip extension depending on the license type.\n\n Returns:\n str: 'hip', 'hipnc', 'hiplc'\n \"\"\"\n license = hou.licenseCategory()\n\n return HIP_LIC.get(license)\n","repo_name":"paul-charp/plouf","sub_path":"plouf/python3.7libs/plouf/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38939217140","text":"from django.shortcuts import render\nfrom django.urls import reverse\nfrom django import forms\nfrom django.http import HttpResponse, HttpResponseRedirect\ntasklist=[]\nclass NewTaskForm(forms.Form): # our NewTaskForm inherits from Form which is present in froms module\n task = forms.CharField(label= \"New Task\")\n priority = forms.IntegerField(label=\"Priority\", min_value= 1, max_value =10)\n\n# Create your views here.\ndef index(request):\n if \"tasklist\" not in request.session:\n request.session[\"tasklist\"] = []\n return render(request, \"tasks/index.html\", { \"tasks\" : request.session[\"tasklist\"]\n }) \n#{\"html template variable which django will try to access\" , python variable }\n\n# Add a new task:\ndef add(request):\n #check whether the method is post\n if request.method == \"POST\":\n #take in the data user submitted and save it as a form\n form = NewTaskForm(request.POST)\n #check if the form is valid (server-side)\n if form.is_valid():\n #isolate the task from the cleaned version of the form data\n task = form.cleaned_data[\"task\"]\n #add a new task to our task list\n request.session[\"tasklist\"] += [task]\n #Redirect the user to the list of tasks\n return(HttpResponseRedirect(reverse(\"tasks:index\")))\n else:\n #if the form is invalid, then rerender the page with existing information\n return render(request, \"tasks/add.html\", { \"form\" : form}) #(a html template variable, python variable)\n else:\n return render(request, \"tasks/add.html\", {\"form\": NewTaskForm()})","repo_name":"QuietkidAniket/harvardcs50webdevcoursework","sub_path":"djangodemo/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25886374267","text":"from dataclasses import dataclass\nimport numpy as np\nfrom transformers import PreTrainedTokenizerBase\nfrom transformers.tokenization_utils_base import BatchEncoding\nfrom numpy.typing import NDArray\nfrom typing import Any, Dict, List, Tuple\n\n\ndef preprocess_cpmbee(example, prompter, tokenizer, options):\n #data = {\"prompt\": example[\"instruction\"], \"input\": example[\"input\"], \"\": example[\"output\"]}\n data = {\"input\": example[\"instruction\"]+ \"\\n\" + example[\"input\"], \"\": example[\"output\"]}\n raw_data = {}\n (\n input_ids,\n input_id_subs,\n context,\n segment_ids,\n segment_rel,\n n_segments,\n _\n ) = tokenizer.convert_data_to_id(data)\n input_ids = input_ids[: options.cutoff_len]\n input_id_subs = input_id_subs[: options.cutoff_len]\n context = context[: options.cutoff_len]\n segment_ids = segment_ids[: options.cutoff_len]\n raw_data[\"input\"] = data\n raw_data[\"samples\"] = []\n sample_ids = np.zeros(input_ids.shape, dtype=np.int32)\n segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)\n num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)\n\n return {\"input_ids\": input_ids, \"inputs_sub\": input_id_subs, \"context\": context, \"sample_ids\": sample_ids, \"segments\": segment_ids, \"num_segments\": num_segments, \"segment_rel_offset\": segment_rel_offset, \"segment_rel\": segment_rel, \"spans\": [input_ids.shape[0]], \"raw_data\": raw_data}\n\n\ndef coll_fn_cpmbee(stage = \"sft\"):\n return preprocess_cpmbee\n\n\n\n@dataclass\nclass DataCollatorForCPMBEE:\n tokenizer: PreTrainedTokenizerBase\n max_length: int\n\n def __call__(self, features):\n _inputs: List[NDArray[np.int32]] = []\n _inputs_sub: List[NDArray[np.int32]] = []\n _context: List[NDArray[np.int8]] = []\n _sample_ids: List[NDArray[np.int32]] = []\n _segments: List[NDArray[np.int32]] = []\n _num_segments: List[NDArray[np.int32]] = []\n _segment_rel_offset: List[NDArray[np.int32]] = []\n _segment_rel: List[NDArray[np.int32]] = []\n _spans: List[List[int]] = []\n _raw_data: List[List[Any]] = []\n\n for feature in features:\n _inputs.append(np.array(feature[\"input_ids\"], dtype=np.int32))\n _inputs_sub.append(np.array(feature[\"inputs_sub\"], dtype=np.int32))\n _context.append(np.array(feature[\"context\"], dtype=np.int8))\n _sample_ids.append(np.array(feature[\"sample_ids\"], dtype=np.int32))\n _segments.append(np.array(feature[\"segments\"], dtype=np.int32))\n _num_segments.append(np.array(feature[\"num_segments\"], dtype=np.int32))\n _segment_rel_offset.append(np.array(feature[\"segment_rel_offset\"], dtype=np.int32))\n _segment_rel.append(np.array(feature[\"segment_rel\"], dtype=np.int32))\n _spans.append(feature[\"spans\"])\n _raw_data.append(feature[\"raw_data\"])\n\n batch_size = len(_inputs)\n inputs = np.zeros((batch_size, self.max_length), dtype=np.int32)\n inputs_sub = np.zeros((batch_size, self.max_length), dtype=np.int32)\n context = np.zeros((batch_size, self.max_length), dtype=np.int8)\n sample_ids = np.zeros((batch_size, self.max_length), dtype=np.int32)\n segments = np.zeros((batch_size, self.max_length), dtype=np.int32)\n num_segments = np.zeros((batch_size, self.max_length), dtype=np.int32)\n segment_rel_offset = np.zeros((batch_size, self.max_length), dtype=np.int32)\n tgt = np.full((batch_size, self.max_length), -100, dtype=np.int32)\n\n max_rel = 0\n for i in range(batch_size):\n max_rel = max(max_rel, _segment_rel[i].shape[0])\n segment_rel = np.zeros((batch_size, max_rel), dtype=np.int32)\n spans = np.zeros((batch_size, self.max_length), dtype=np.int32)\n length = np.zeros((batch_size,), dtype=np.int32)\n\n batch_ext_table_map: Dict[Tuple[int, int], int] = {}\n batch_ext_table_ids: List[int] = []\n batch_ext_table_sub: List[int] = []\n raw_data_list: List[Any] = []\n\n for i in range(batch_size):\n instance_length = _inputs[i].shape[0]\n rel_size = _segment_rel[i].shape[0]\n inputs[i, :instance_length] = _inputs[i]\n inputs_sub[i, :instance_length] = _inputs_sub[i]\n context[i, :instance_length] = _context[i]\n sample_ids[i, :instance_length] = _sample_ids[i]\n segments[i, :instance_length] = _segments[i]\n num_segments[i, :instance_length] = _num_segments[i]\n segment_rel_offset[i, :instance_length] = _segment_rel_offset[i]\n segment_rel[i, :rel_size] = _segment_rel[i]\n\n span_begin = 0\n for span_id, span_end in enumerate(_spans[i]):\n spans[i, span_begin:span_end] = span_id\n span_begin = span_end\n length[i] = instance_length\n raw_data_list.extend(_raw_data[i])\n\n for j in range(instance_length):\n idx, idx_sub = _inputs[i][j], _inputs_sub[i][j]\n tgt_idx = idx\n if idx_sub > 0:\n # need to be in ext table\n if (idx, idx_sub) not in batch_ext_table_map:\n batch_ext_table_map[(idx, idx_sub)] = len(batch_ext_table_map)\n batch_ext_table_ids.append(idx)\n batch_ext_table_sub.append(idx_sub)\n tgt_idx = batch_ext_table_map[(idx, idx_sub)] + self.tokenizer.vocab_size\n if j > 1 and context[i, j - 1] == 0:\n if idx != self.tokenizer.bos_token_id:\n tgt[i, j - 1] = tgt_idx\n else:\n tgt[i, j - 1] = self.tokenizer.eos_token_id\n if context[i, instance_length - 1] == 0:\n tgt[i, instance_length - 1] = self.tokenizer.eos_token_id\n \n if len(batch_ext_table_map) == 0:\n # placeholder\n batch_ext_table_ids.append(0)\n batch_ext_table_sub.append(1)\n\n return BatchEncoding({\n \"input_ids\": inputs,\n \"input_id_sub\": inputs_sub,\n \"length\": length,\n \"context\": context > 0,\n \"sample_ids\": sample_ids,\n \"num_segments\": num_segments,\n \"segment\": segments,\n \"segment_rel_offset\": segment_rel_offset,\n \"segment_rel\": segment_rel,\n \"span\": spans,\n \"labels\": tgt,\n \"ext_table_ids\": np.array(batch_ext_table_ids, dtype=np.int32),\n \"ext_table_sub\": np.array(batch_ext_table_sub, dtype=np.int32)\n }, tensor_type=\"pt\")\n\n","repo_name":"zjunlp/DeepKE","sub_path":"example/llm/InstructKGC/src/datamodule/cpmbee.py","file_name":"cpmbee.py","file_ext":"py","file_size_in_byte":6690,"program_lang":"python","lang":"en","doc_type":"code","stars":2490,"dataset":"github-code","pt":"47"} +{"seq_id":"27714958172","text":"from helper import *\nfrom config import hyperparams\nimport os\nimport tensorflow as tf\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\nfrom time import sleep\nimport re\nimport numpy as np\nimport time\nimport json\nfrom glob import glob\nimport pickle\nfrom tqdm import tqdm, trange\nimport wandb\n# import click\nimport argparse\nimport io\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nnpy_dir, EPOCHS, sample_size, BATCH_SIZE, BUFFER_SIZE, embedding_dim, units, top_k, features_shape, attention_features_shape, cpt, wb, npy = hyperparams()\n\n# print(BATCH_SIZE)\n# @click.command()\n# @click.option('--batch_size', default=BATCH_SIZE)\n# @click.option('--buffer_size', default=BUFFER_SIZE)\n# @click.option('--embed_dim', default=embedding_dim)\n# @click.option('--epochs', default=EPOCHS)\n# @click.option('--unitss', default=units)\n# def hello(batch_size: int, buffer_size: int, embed_dim: int, epochs: int, unitss: int):\n# BATCH_SIZE = batch_size\n# BUFFER_SIZE = buffer_size\n# embedding_dim = embed_dim\n# EPOCHS = epochs\n# units = unitss\n# method()\n# print('here')\n\n\n\nap = argparse.ArgumentParser()\nap.add_argument('--batch_size', type=int, default=BATCH_SIZE)\nap.add_argument('--buffer_size', type=int, default=BUFFER_SIZE)\nap.add_argument('--epochs', type=int, default=EPOCHS)\nap.add_argument('--ckpt', default=\"./checkpoints\")\nargs = vars(ap.parse_args())\n\ncheckpoint_path = os.path.join(args['ckpt'], 'train/')\n\nBATCH_SIZE, BUFFER_SIZE, EPOCHS = args['batch_size'], args['buffer_size'], args['epochs']\n\nvocab_size = top_k + 1\nif wb:\n config={'batch_size' : BATCH_SIZE,\n 'epochs' : EPOCHS,\n 'buffer_size' : BUFFER_SIZE\n }\n wandb.init(project=\"azure-captioning\", sync_tensorboard=True, config=config)\n\n\n\nannotation_file = 'annotations/captions_train2014.json'\nPATH = 'train2014/'\n\n########################################------1------# pre-steps\n# Read the json file\nwith open(annotation_file, 'r') as f:\n annotations = json.load(f)\n\n# Store captions and image names in vectors\nall_captions = []\nall_img_name_vector = []\n\nfor annot in annotations['annotations']:\n caption = ' ' + annot['caption'] + ' '\n image_id = annot['image_id']\n full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)\n\n all_img_name_vector.append(full_coco_image_path)\n all_captions.append(caption)\n\n# Shuffle captions and image_names together\n# Set a random state\ntrain_captions, img_name_vector = shuffle(all_captions,\n all_img_name_vector,\n random_state=1)\n\ntrain_captions = train_captions[:sample_size]\nimg_name_vector = img_name_vector[:sample_size]\n\ndel all_captions\ndel all_img_name_vector\n\nprint(\"DON'T FORGET TO MOUNT NPY FILES DRIVE!!\")\nprint('1')\n########################################------2------# pretrained model - Inception V3\nimage_model = tf.keras.applications.InceptionV3(include_top=False,\n weights='imagenet')\nnew_input = image_model.input\nhidden_layer = image_model.layers[-1].output\n\nimage_features_extract_model = tf.keras.Model(new_input, hidden_layer)\n\n\nprint('2')\n########################################------3------# caching features\n# Get unique images\nencode_train = sorted(set(img_name_vector))\n\n# Feel free to change batch_size according to your system configuration\nimage_dataset = tf.data.Dataset.from_tensor_slices(encode_train)\nimage_dataset = image_dataset.map(\n load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(128)\n\nif npy:\n i = 0\n for img, path in tqdm(image_dataset):\n i += 1\n if i % 500 == 0: sleep(60)\n batch_features = image_features_extract_model(img)\n batch_features = tf.reshape(batch_features,\n (batch_features.shape[0], -1, batch_features.shape[3]))\n\n for bf, p in zip(batch_features, path):\n path_of_feature = p.numpy().decode(\"utf-8\")\n path_of_feature = os.path.join(npy_dir, os.path.basename(path_of_feature))\n if not os.path.isfile(path_of_feature+'.npy'):\n np.save(path_of_feature, bf.numpy())\n\n del i\n exit()\n\n\n# for _,i in enumerate(img_name_vector):\n# if _ == 1: break\n# print(np.array(img_name_vector))\n# if np.array_equal(a, np.array(img_name_vector)[1]): print('ok')\n# exit()\n\n\n\n\n########################################------4------# preprocessing captions\nif not os.path.isfile('tokenizer.json'):\n# # Choose the top 5000 words from the vocabulary\n tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k,\n oov_token=\"\",\n filters='!\"#$%&()*+.,-/:;=?@[\\]^_`{|}~ ')\n tokenizer.fit_on_texts(train_captions)\n train_seqs = tokenizer.texts_to_sequences(train_captions)\n\n tokenizer.word_index[''] = 0\n tokenizer.index_word[0] = ''\n\n # saving tokenizer\n # with open('tokenizer.pickle', 'wb') as handle:\n # pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n tokenizer_json = tokenizer.to_json()\n\n with io.open('tokenizer.json', 'w', encoding='utf-8') as f:\n f.write(json.dumps(tokenizer_json, ensure_ascii=False))\n\nelse: # load the tokenizer file\n with open('tokenizer.json') as f:\n datax = json.load(f)\n tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(datax)\n del datax\n\n\n# Create the tokenized vectors\ntrain_seqs = tokenizer.texts_to_sequences(train_captions)\n\n# Pad each vector to the max_length of the captions\n# If you do not provide a max_length value, pad_sequences calculates it automatically\ncap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')\n\n# Calculates the max_length, which is used to store the attention weights\nmax_length = calc_max_length(train_seqs)\n\n\nprint('3')\n########################################------5------# split data\n# Create training and validation sets using an 80-20 split\nimg_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector,\n cap_vector,\n test_size=0.2,\n random_state=0)\n\n\n\n########################################------6------# create tf.dataset\ndataset = tfdataset(img_name_train, cap_train)\n\n\n########################################------7------# model\nencoder = CNN_Encoder(embedding_dim)\ndecoder = RNN_Decoder(embedding_dim, units, vocab_size)\n\noptimizer = tf.keras.optimizers.Adam()\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_)\n\n\n########################################------8------# checkpoint\nstart_epoch = 0\nif cpt:\n ckpt = tf.train.Checkpoint(encoder=encoder,\n decoder=decoder,\n optimizer = optimizer)\n ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)\n\n \n if ckpt_manager.latest_checkpoint:\n start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])\n # restoring the latest checkpoint in checkpoint_path\n ckpt.restore(ckpt_manager.latest_checkpoint)\n\n\n\n\n\n\n\n########################################------8------# train step\n@tf.function\ndef train_step(img_tensor, target):\n loss = 0\n\n # initializing the hidden state for each batch\n # because the captions are not related from image to image\n hidden = decoder.reset_state(batch_size=target.shape[0])\n\n dec_input = tf.expand_dims([tokenizer.word_index['']] * target.shape[0], 1)\n\n with tf.GradientTape() as tape:\n features = encoder(img_tensor)\n\n for i in range(1, target.shape[1]):\n # passing the features through the decoder\n predictions, hidden, _ = decoder(dec_input, features, hidden)\n\n loss += loss_function(target[:, i], predictions)\n\n # using teacher forcing\n dec_input = tf.expand_dims(target[:, i], 1)\n\n total_loss = (loss / int(target.shape[1]))\n\n trainable_variables = encoder.trainable_variables + decoder.trainable_variables\n\n gradients = tape.gradient(loss, trainable_variables)\n\n optimizer.apply_gradients(zip(gradients, trainable_variables))\n\n return loss, total_loss\n\n\n\n\n\n########################################------8------# TRAIN #-----------------#################################################\nnum_steps = len(img_name_train) // BATCH_SIZE\nprint('start training ..')\nfor epoch in range(EPOCHS):\n start = time.time()\n total_loss = 0\n\n for (batch, (img_tensor, target)) in tqdm(enumerate(dataset), ascii=True):\n batch_loss, t_loss = train_step(img_tensor, target)\n total_loss += t_loss\n\n if wb and batch % 10 == 0:\n wandb.log({'loss':batch_loss.numpy() / int(target.shape[1])})\n\n if (batch+1) % 100 == 0:\n print ('Epoch {}/Epochs {} Batch {} Loss {:.4f}'.format(\n epoch + 1, EPOCHS, batch, batch_loss.numpy() / int(target.shape[1])))\n sleep(20)\n\n if epoch % 5 == 0 and cpt:\n ckpt_manager.save()\n\n print ('Epoch {} Loss {:.6f}'.format(epoch + 1,\n total_loss/num_steps))\n\n print ('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))\n\n\n","repo_name":"kyteinsky/captain-caption","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9465769524","text":"from typing import Any, Dict, List, Optional\n\nfrom loguru import logger\nfrom pydantic import ValidationError\n\nfrom indi.model import WGSFileTreeMetadata, WGSMetadata, WGSObjectKey\n\n\nclass ExtractWGSFileTreeMetadata:\n def __init__(self) -> None:\n # List of all valid objects keys\n self.object_keys: List[WGSObjectKey] = []\n\n self.wgs_filetree_metadata: Optional[WGSFileTreeMetadata] = None\n\n def read_json(self, object_keys: Any) -> None:\n \"\"\"Read in the list of object keys,\n convert each to WGSObjectKey object for validation and store in a list\n\n Args:\n object_keys (Any): List of object keys\n\n Raises:\n ValueError: Raise error if the input is empty\n \"\"\"\n if not object_keys:\n raise ValueError(\"Empty list\")\n\n for object_key in object_keys:\n try:\n self.object_keys.append(WGSObjectKey(object_key=object_key))\n except ValidationError as err:\n logger.error(\n f\"\"\"Error for object key: {object_key}.\\nError: {err.errors()[0][\"msg\"]}\\nSkipping.\"\"\"\n )\n\n def _unique_object_keys(self) -> List[WGSObjectKey]:\n unique_object_keys = []\n object_key_dict: Dict[str, int] = {}\n for it, object_key in enumerate(self.object_keys):\n if object_key.object_key in object_key_dict:\n logger.error(\n f\"Object key {object_key.object_key} found on line {it} \"\n f\"already exists on line {object_key_dict[object_key.object_key]}.\"\n \"Skipping.\"\n )\n else:\n object_key_dict[object_key.object_key] = it\n unique_object_keys.append(object_key)\n\n return unique_object_keys\n\n def _object_key_to_metadata(\n self, object_key: WGSObjectKey\n ) -> Optional[WGSMetadata]:\n try:\n return WGSMetadata.parse_object_key(object_key)\n except ValidationError as err:\n logger.error(\n f\"\"\"Error for object key: {object_key}.\\nError: {err.errors()[0][\"msg\"]}\\nSkipping.\"\"\"\n )\n return None\n\n def _get_combined_metadata_for_sample_id(\n self, object_keys: List[WGSObjectKey]\n ) -> List[WGSMetadata]:\n \"\"\"\n Combining metadata from object keys to make sure\n 1 sample_id has 1 Metadata object\n \"\"\"\n\n # Dictionary to use for ensuring 1 sample_id has only 1 WGSMetadata object\n sample_id_to_metadata: Dict[str, WGSMetadata] = {}\n\n for object_key in object_keys:\n object_key_metadata = self._object_key_to_metadata(object_key)\n if object_key_metadata is None:\n continue\n\n sample_id = object_key_metadata.sample_id\n if sample_id not in sample_id_to_metadata:\n sample_id_to_metadata[sample_id] = object_key_metadata\n else:\n sample_id_to_metadata[sample_id].lanes.extend(object_key_metadata.lanes)\n\n return list(sample_id_to_metadata.values())\n\n def _sort_metadata_lanes(\n self, sample_id_metadata: List[WGSMetadata]\n ) -> List[WGSMetadata]:\n \"\"\"\n Sort lanes in metadata for better readability\n \"\"\"\n wgs_filetree_metadata: List[WGSMetadata] = []\n for metadata in sample_id_metadata:\n lanes = sorted(\n metadata.lanes,\n key=lambda lane: (\n lane.barcode,\n lane.marker_forward,\n lane.marker_reverse,\n lane.lane,\n ),\n )\n\n wgs_filetree_metadata.append(\n WGSMetadata(\n case_id=metadata.case_id,\n sample_label=metadata.sample_label,\n sample_id=metadata.sample_id,\n data_type=metadata.data_type,\n lanes=lanes,\n )\n )\n return wgs_filetree_metadata\n\n def extract_wgs_filetree_metadata(self) -> None:\n \"\"\"\n This is an orchestrator function to extract WGS metadata from the object keys.\n First we make sure to use only unique keys.\n Then, we convert object keys to metadata.\n Then, we combine metadata to make sure 1 sample_id has 1 Metadata object.\n Finally, we sort lanes for metadata for better readability.\n \"\"\"\n object_keys = self._unique_object_keys()\n\n # Dictionary to use for ensuring 1 sample_id has only 1 WGSMetadata object\n sample_id_metadata = self._get_combined_metadata_for_sample_id(object_keys)\n\n wgs_filetree_metadata = self._sort_metadata_lanes(sample_id_metadata)\n\n self.wgs_filetree_metadata = WGSFileTreeMetadata(\n filetree_metadata=wgs_filetree_metadata\n )\n\n def get_wgs_filetree_metadata(self) -> Any:\n \"\"\"Create json from the metadata ensuring correct names\n\n Returns:\n Any: json for the metadata\n\n Raises:\n ValueError: Raise error if metadata is None\n \"\"\"\n if self.wgs_filetree_metadata is not None:\n return self.wgs_filetree_metadata.model_dump(by_alias=True)[\n \"filetree_metadata\"\n ]\n else:\n raise ValueError(\"Filetree is None. Process it before getting data.\")\n","repo_name":"vikramsg/indi","sub_path":"indi/wgs_filetree_metadata.py","file_name":"wgs_filetree_metadata.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71410413262","text":"'''Copies document templates into project folders for the Lead In phase'''\n\nfrom os import environ as env\nfrom operator import eq, ne\nimport logging\nimport json\nimport sys\nfrom datetime import datetime\n\nimport fiscalyear\nfrom fiscalyear import FiscalDateTime\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\nfrom googleapiclient.errors import HttpError\nfrom pydrive.auth import GoogleAuth, AuthError\nfrom pydrive.drive import GoogleDrive\nfrom pydrive.files import ApiRequestError, FileNotUploadedError\nfrom pydrive.settings import InvalidConfigError\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.WARNING)\n\nGDRIVE_SNS_TOPIC_ARN = env.get('GDRIVE_SNS_TOPIC_ARN')\nSNS = boto3.client('sns')\nDDB = boto3.resource('dynamodb', region_name='us-east-1')\n\nfiscalyear.START_MONTH = 4\nFISCAL_YEAR = FiscalDateTime.now()\n\nclass WorthRetryingException(Exception):\n '''Base error class for exceptions worth retrying'''\n\n\nclass GDriveAuthError(WorthRetryingException):\n '''General authentication error'''\n # Worth retrying until we discover which errors are impossible to rectify\n\n\nclass TemporaryGlitch(WorthRetryingException):\n '''Idempotent Glitch error class'''\n\n\nclass SnsPublishError(Exception):\n '''SNS publish error'''\n\n\nclass GDriveBaseError(Exception):\n '''Base GDrive error'''\n\n\nclass GDriveFolderNotFoundError(WorthRetryingException):\n '''GDrive folder missing error'''\n\n\ndef init_auth(settings_file='settings.yaml'):\n '''Initialize GoogleDrive auth object'''\n try:\n gauth = GoogleAuth(\n settings_file=settings_file\n )\n gauth.ServiceAuth()\n except AuthError as erra:\n exc_info = sys.exc_info()\n raise GDriveAuthError(erra).with_traceback(exc_info[2])\n except InvalidConfigError as errc:\n exc_info = sys.exc_info()\n raise GDriveBaseError(errc).with_traceback(exc_info[2])\n\n return GoogleDrive(gauth)\n\n\ndef build_sns_message(message, copied_file_links, folder_ids=None):\n '''Construct SNS message and include info about the fields that were updated'''\n sns_message = {\n \"CustomerName\": message['CustomerName'],\n \"ProjectName\": message['ProjectName'],\n \"DealId\": message['DealId'],\n \"Territory\": message['Territory'],\n \"FolderIds\": folder_ids,\n \"CopiedFileLinks\": copied_file_links\n }\n return sns_message\n\n\ndef build_message_attributes(action, stage):\n '''Construct message attributes'''\n message_attributes = {\n 'component': {\n 'DataType': 'String',\n 'StringValue': 'gdrive'\n },\n 'action': {\n 'DataType': 'String',\n 'StringValue': action\n },\n 'stage': {\n 'DataType': 'String',\n 'StringValue': stage\n }\n }\n return message_attributes\n\n\ndef publish_sns_message(sns_topic_arn, message, attributes):\n '''Publish message to SNS topic'''\n print('SNS message: {}'.format(message))\n try:\n resp = SNS.publish(\n TopicArn=sns_topic_arn,\n Message=json.dumps(message),\n MessageAttributes=attributes\n )\n except ClientError as errc:\n exc_info = sys.exc_info()\n raise SnsPublishError(errc).with_traceback(exc_info[2])\n\n print('SNS Response: {}'.format(resp))\n return resp\n\n\ndef copy_file(drive, source_id, dest_title, parent_id):\n '''Copy an existing file'''\n copied_file = {\n 'title': dest_title,\n 'parents': [\n {\n 'id': parent_id\n }\n ]\n }\n try:\n file_data = drive.auth.service.files().copy(\n fileId=source_id, body=copied_file).execute()\n return drive.CreateFile({'id': file_data['id']})\n except HttpError as errh:\n raise errh\n except Exception as error:\n exc_info = sys.exc_info()\n raise Exception(error).with_traceback(exc_info[2])\n\n\ndef get_doc_template_ids(stage):\n '''Retrieve Doc Template IDs for this stage from DynamoDB'''\n doc_templates = {}\n table = DDB.Table('gdrive-doc-templates')\n\n try:\n response = table.query(\n KeyConditionExpression=Key('stage').eq(stage)\n )\n except ClientError as errc:\n exc_info = sys.exc_info()\n raise Exception(errc).with_traceback(exc_info[2])\n\n for i in response['Items']:\n doc_templates.update({i['tag'] : i['id']})\n return doc_templates\n\n\ndef get_folder_ids(message):\n '''Retrieves folder_ids dict for customer project from dynamodb'''\n table = DDB.Table('gdrive-customers')\n\n try:\n response = table.get_item(\n Key={\n 'customer': message['CustomerName'],\n 'project': message['ProjectName']\n },\n ProjectionExpression='folder_ids'\n )\n except ClientError as errc:\n LOGGER.exception(errc)\n exc_info = sys.exc_info()\n raise Exception(errc).with_traceback(exc_info[2])\n\n try:\n return response['Item']['folder_ids']\n except KeyError as errk:\n LOGGER.exception(errk)\n sns_message = build_sns_message(message, {})\n message_attributes = build_message_attributes('folder_missing', 'error')\n publish_sns_message(GDRIVE_SNS_TOPIC_ARN, sns_message, message_attributes)\n raise GDriveFolderNotFoundError('Gdrive Folders missing for {} - {}'.format(message['CustomerName'], message['ProjectName']))\n\n\ndef get_docs_to_copy(message, doc_templates, stage_name, folder_ids):\n '''Returns a formatted dict of documents that need to be copied'''\n docs = {\n 'lead_in': {\n '{}_Account_Plan_Q{}_{}'.format(message['CustomerName'], FISCAL_YEAR.quarter, datetime.today().strftime('%Y')): {\n 'tag': 'AccountPlan',\n 'dest': folder_ids['AccountFolder']['RootId'],\n 'field_name': 'AccountPlanLink'\n },\n '{}_{}_Risk Log'.format(message['CustomerName'], message['ProjectName']): {\n 'tag': 'RiskLog',\n 'dest': folder_ids['SalesFolder']['ProjectId'],\n 'field_name': 'RiskLogLink'\n },\n 'Add New APN Opportunity': {\n 'tag': 'APNPortalOpp',\n 'dest': folder_ids['SalesFolder']['SubFolders']['APN Portal Admin'],\n 'field_name': 'APNPortalOppLink'\n }\n },\n \"lead_validation\": {\n \"Pre-KickOff Project Notes\": {\n \"tag\": 'KickOffNotes',\n \"dest\": folder_ids['SalesFolder']['SubFolders']['Meeting_Notes'],\n \"field_name\": \"KickOffNotesLink\"\n }\n },\n 'deal_closure': {\n '{}-{}_Weekly_Status_Report_{}'.format(message['CustomerName'], message['ProjectName'], datetime.today().strftime('%m-%d-%Y')): {\n 'tag': 'WeeklyStatusReport',\n 'dest': folder_ids['DeliveryFolder']['SubFolders']['Weekly_Action_Reports'],\n 'field_name': 'WeeklyStatusReportLink'\n },\n 'Engagement_Data': {\n 'tag': 'EngagementDataPoints',\n 'dest': folder_ids['DeliveryFolder']['SubFolders']['Engagement_Data_Reports'],\n 'field_name': 'EngagementDataPointsLink'\n }\n }\n }\n\n try:\n for (title, info) in docs[stage_name].items():\n info['id'] = doc_templates[info['tag']]\n return docs[stage_name]\n except KeyError as errk:\n LOGGER.exception(errk)\n raise GDriveBaseError(errk)\n\n\ndef copy_files_from_doclist(drive, stage_doc_list, message):\n '''Iterate over doc list and copy each file to destination folder'''\n copied_file_links = {}\n errors = []\n for (title, info) in stage_doc_list.items():\n match = check_file_exists(drive, info['dest'], title)\n if not match:\n try:\n result = copy_file(drive, info['id'], title, info['dest'])\n copied_file_links.update({info['field_name'] : result['alternateLink']})\n except HttpError as errh:\n if errh.resp.status == 404:\n sns_message = build_sns_message(message, copied_file_links)\n message_attributes = build_message_attributes('folder_missing', 'error')\n publish_sns_message(GDRIVE_SNS_TOPIC_ARN, sns_message, message_attributes)\n raise GDriveFolderNotFoundError(errh)\n except Exception as error:\n LOGGER.exception(error)\n errors.append(error)\n else:\n file_object = drive.CreateFile({'id': match[0]['id']})\n copied_file_links.update({info['field_name'] : file_object['alternateLink']})\n if errors:\n print('Errors received: {}'.format(errors))\n\n return copied_file_links\n\n\ndef list_file_object(drive, folder_id, directory_only=False):\n '''Iterates over a folder and returns list of all child objects'''\n _q = {'q': \"'{}' in parents and trashed=false\".format(folder_id)}\n file_object_list = drive.ListFile(_q).GetList()\n op = {True: eq, False: ne}[directory_only]\n file_objects = [\n x for x in file_object_list\n if op(x['mimeType'], 'application/vnd.google-apps.folder')\n ]\n return [{'id': fld['id'], 'title': fld['title']} for fld in file_objects]\n\n\ndef check_file_exists(drive, parent_folder_id, title):\n '''Check if a folder with the given title exists within the parent folder'''\n folder_list = list_file_object(\n drive,\n parent_folder_id\n )\n match = [x for x in folder_list if x['title'] == title]\n return match\n\n\ndef format_response(message):\n ''' Format the message to be returned as the response body '''\n message = {'message': message}\n return json.dumps(message)\n\n\ndef lambda_handler(event, context):\n '''Copy files Lead In entry'''\n logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)\n response = {\"status\": 200}\n\n print('Event received: {}'.format(event))\n\n try:\n message = json.loads(event['Records'][0]['Sns']['Message'])\n pipedrive_stage = event['Records'][0]['Sns']['MessageAttributes']['stage']['Value']\n\n if pipedrive_stage == 'lead_in':\n folder_ids = message['FolderIds']\n else:\n # Retrieve folder ids from dynamodb\n folder_ids = get_folder_ids(message)\n\n # Initialize GDrive authentication\n drive = init_auth()\n\n # Based on pipedrive stage, grab the docs that need to be copied\n doc_templates = get_doc_template_ids(pipedrive_stage)\n doc_list = get_docs_to_copy(message, doc_templates, pipedrive_stage, folder_ids)\n copied_file_links = copy_files_from_doclist(drive, doc_list, message)\n\n # Publish a message to Gdrive Topic\n sns_message = build_sns_message(message, copied_file_links, folder_ids)\n message_attributes = build_message_attributes('copy_files', pipedrive_stage)\n sns_response = publish_sns_message(GDRIVE_SNS_TOPIC_ARN,\n sns_message,\n message_attributes)\n response['body'] = format_response(sns_response)\n\n except Exception as error:\n if isinstance(error, WorthRetryingException):\n raise error\n\n else:\n LOGGER.exception(error)\n response['statusCode'] = 500\n message = {\n 'error': {\n 'type': type(error).__name__,\n 'description': str(error),\n },\n }\n response['body'] = format_response(message)\n\n return response\n","repo_name":"greghoggard/pipedrive-automation","sub_path":"Components/gdrive/copy_files.py","file_name":"copy_files.py","file_ext":"py","file_size_in_byte":11714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23781424910","text":"from google.appengine.ext.ndb.google_imports import datastore_rpc\n\nfrom ..errors import MalformedObjectError\n\n\nclass ReadConsistency:\n STRONG = 'STRONG' # ?\n EVENTUAL = 'EVENTUAL' # ?\n\nclass ReadOptions:\n @classmethod\n def from_api(cls, consistency_type=None, transaction=None):\n self = cls()\n\n self.consistency_type = consistency_type\n if self.consistency_type:\n self.read_policy = {\n ReadConsistency.STRONG: datastore_rpc.Configuration.STRONG_CONSISTENCY,\n ReadConsistency.EVENTUAL: datastore_rpc.Configuration.EVENTUAL_CONSISTENCY,\n }.get(consistency_type, None)\n\n if self.read_policy is None:\n raise MalformedObjectError('consistency_type {} is unknown'.format(consistency_type))\n\n self.transaction = transaction\n\n return\n\n def get_options(self):\n if self.transaction is not None:\n raise NotImplementedError('Read operations in a specified transaction are not supported')\n\n return datastore_rpc.Configuration(read_policy=self.read_policy)\n","repo_name":"leenr/google-datastore-restapi-ndb","sub_path":"api_objects/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72955081104","text":"from django.db import models\n\n# Create your models here.\n\nclass adicionales(models.Model):\n titulo=models.CharField(max_length=50)\n contenido=models.CharField(max_length=200)\n imagen=models.ImageField(upload_to='adicionales')\n created=models.DateTimeField(auto_now_add=True)\n update=models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name='adicional'\n verbose_name_plural='adicionales'\n\n \n def __str__(self):\n return self.titulo","repo_name":"diaznico/DulceDespertar","sub_path":"Adicionales/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39507065377","text":"# **TASK: Notice how there is more than one page, and subsequent pages look like this http://quotes.toscrape.com/page/2/. \n# Use what you know about for loops and string concatenation to loop through all the pages and get all the unique authors on the website. \n# Keep in mind there are many ways to achieve this, also note that you will need to somehow figure out how to check that your loop is on the last page with quotes. \n# For debugging purposes, I will let you know that there are only 10 pages, so the last page is http://quotes.toscrape.com/page/10/, but try to create a loop that \n#is robust enough that it wouldn't matter to know the amount of pages beforehand, perhaps use try/except for this, its up to you!**\n\n\n\npage_on = True\nnumpage = 1\nauthors = set()\nwhile page_on:\n\n # Chck page number\n \n print(numpage)\n \n # Cnnt to website/page and convert text into readable state. \n \n cnnt_to_web = requests.get(f\"http://quotes.toscrape.com/page/{numpage}/\")\n soup = bs4.BeautifulSoup(cnnt_to_web.text, \"lxml\")\n \n # Chck if a loaded page is empty. For empty page exit the loop. \n # For a page with a content get set of authors and add page number to continue with the loop.\n \n if len([author for author in soup.select(\".author\")]) == 0:\n page_on = False\n else:\n for author in soup.select(\".author\"):\n authors.add(author.text)\n numpage += 1\n \nauthors\n","repo_name":"MrZiemni0k/Learning","sub_path":"OnHold/WebScrape_Project/WebscrapingTask.py","file_name":"WebscrapingTask.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33188142544","text":"class Solution:\n def myAtoi(self, s: str) -> int:\n # INT_MAX (2^31 − 1) or INT_MIN (−2^31) \n s = s.strip()\n ret = 0\n \n if not s or len(s) == 0:\n return ret\n \n flag = None\n if s[0] in '+-':\n flag = 1 if s[0] == '+' else -1\n \n pos = 1 if flag else 0\n while pos < len(s) and s[pos].isdigit():\n ret = 10 * ret + int(s[pos])\n pos += 1\n \n ret = flag * ret if flag else ret\n ret = max(min(2**31 - 1, ret), -2**31)\n \n return ret\n ","repo_name":"zach96guan/Stupid_LeetCoder","sub_path":"String/8.String_to_Integer(atoi)/String_to_Integer(atoi).py","file_name":"String_to_Integer(atoi).py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"23383926362","text":"import os\nimport yaml\nfrom common.frame.pylog import log\n\n\nclass Getdata():\n\n def get_config_data(self, cfg_fname):\n '''get_config_data 获取config.yaml文件的内容,以字典的形式返回'''\n try:\n curPath = os.path.dirname(os.path.realpath(__file__))\n yamlPath = os.path.join(os.path.dirname(os.path.dirname(curPath)), \"config\\\\\" + cfg_fname)\n # mzlog.log.info(\"读取配置文件\")\n with open(yamlPath, 'r', encoding='utf-8') as f:\n cfg = f.read()\n cfg_data = yaml.load(cfg)\n return cfg_data\n except Exception as err:\n log.error(\"读取配置文件失败:{}\".format(err))\n\n\n def get_case_data(self, fpath, fname, ):\n try:\n curPath = os.path.dirname(os.path.realpath(__file__))\n caseFilepath = os.path.join(os.path.dirname(os.path.dirname(curPath)),\n \"data\\\\http\\\\\" + fpath + \"\\\\\" + fname)\n # mzlog.log.info(\"读取测试用例文件\")\n with open(caseFilepath, encoding=\"utf-8\") as f:\n case_data = f.read()\n return case_data\n except Exception as err:\n log.error(\"读取测试用例文件失败:{}\".format(err))\n\n def get_interface_url(self, fname):\n\n try:\n curPath = os.path.dirname(os.path.realpath(__file__))\n urlFilepath = os.path.join(os.path.dirname(os.path.dirname(curPath)), \"template\\\\http\\\\\" + fname)\n # mzlog.log.info(\"读取url文件\")\n with open(urlFilepath) as f:\n url_data = f.read()\n return url_data\n except Exception as err:\n log.error(\"读取url文件失败:{}\".format(err))\n\n def get_ip_port(self, environment_type):\n\n '''根据环境类型,选择对于的测试ip和port'''\n if environment_type == \"test\":\n # mzlog.log.info(\"测试环境\")\n ip = self.get_config_data(\"config.yaml\")[environment_type][\"ip\"]\n port = self.get_config_data(\"config.yaml\")[environment_type][\"port\"]\n return (ip, port)\n elif environment_type == \"formal\":\n # mzlog.log.info(\"测试环境\")\n ip = self.get_config_data(\"config.yaml\")[\"formal\"][\"ip\"]\n port = self.get_config_data(\"config.yaml\")[\"formal\"][\"port\"]\n return (ip, port)\n else:\n return\n\n\n\n\nif __name__ == '__main__':\n getdata = Getdata()\n data = getdata.get_config_data(\"config.yaml\")\n print(data)\n","repo_name":"15016665135/miya_interfacetest","sub_path":"Http_Test_Project/common/frame/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19764407704","text":"#!/usr/bin/env python\n##############################################################################\n#\n# wxextensions by DANSE Diffraction group\n# Simon J. L. Billinge\n# (c) 2006 trustees of the Michigan State University.\n# All rights reserved.\n#\n# File coded by: Chris Farrow\n#\n# See AUTHORS.txt for a list of people who contributed.\n# See LICENSE.txt for license information.\n#\n##############################################################################\n\n\"\"\"This module contains TextValidator, which is an input validator for the\nwxTextCtrl. See the wxPython documentation for wxTextCtrl for more about text\nvalidators. Three constants are defined for use in TextValidator: ALPHA_ONLY,\nDIGIT_ONLY, and FLOAT_ONLY. See the TextValidator class for how these are used.\n\"\"\"\n\nALPHA_ONLY = 1\nDIGIT_ONLY = 2\nFLOAT_ONLY = 3\n\nimport wx\nimport string\n\nclass TextValidator(wx.Validator):\n \"\"\"This validator is designed to check text input for wxTextCtrls. (It might\n have uses in other widgets.) It can validate for letters only, digits only,\n floats only, and can allow for a negative at the beginning of a digit string\n or a negative float.\n \"\"\"\n\n def __init__(self, flag=DIGIT_ONLY, allowNeg=False):\n \"\"\"Initialize the validator.\n\n flag -- DIGIT_ONLY, allow only digits (default)\n ALPHA_ONLY, allow only letters\n FLOAT_ONLY, allow only floats\n\n allowNeg -- Allow a negative sign in front of DIGIT_ONLY, or\n FLOAT_ONLY text. (default False)\n \"\"\"\n wx.Validator.__init__(self)\n self.flag = flag\n self.allowNeg = allowNeg\n self.Bind(wx.EVT_CHAR, self.OnChar)\n\n def Clone(self):\n return TextValidator(self.flag, self.allowNeg)\n\n def Validate(self, win):\n tc = self.GetWindow()\n val = tc.GetValue()\n\n if self.flag == ALPHA_ONLY:\n return val.isalpha()\n\n elif self.flag == DIGIT_ONLY:\n if self.allowNeg:\n val1 = val[:1].lstrip('-') + val[1:]\n else:\n val1 = val\n return val1.isdigit()\n\n elif self.flag == FLOAT_ONLY:\n try:\n x = float(val)\n if x < 0 and not self.allowNeg:\n return False\n except ValueError:\n return False\n\n return True\n\n def OnChar(self, event):\n key = event.GetKeyCode()\n\n if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:\n event.Skip()\n return\n\n if self.flag == ALPHA_ONLY and chr(key) in string.ascii_letters:\n event.Skip()\n return\n\n # resolve the new value here\n win = self.GetWindow()\n val = win.GetValue()\n insertion = win.GetInsertionPoint()\n first, last = win.GetSelection()\n if first != last:\n val = val[:first] + val[last:]\n insertion = first\n newval = val[:insertion] + chr(key) + val[insertion:]\n\n if self.flag == DIGIT_ONLY:\n newval1 = newval\n if self.allowNeg:\n newval1 = newval[:1].lstrip('-') + newval[1:]\n if newval1.isdigit():\n event.Skip()\n return\n\n if self.flag == FLOAT_ONLY:\n try:\n x = float(newval+\"1\") # Catches \"1e\", a float to be\n if x >= 0 or self.allowNeg:\n event.Skip()\n return\n\n except ValueError:\n pass\n\n if not wx.Validator.IsSilent():\n wx.Bell()\n\n # Returning without calling even. Skip eats the event before it\n # gets to the text control\n return\n\n # These are needed so the validator can work in dialogs.\n def TransferToWindow(self):\n return True\n\n def TransferFromWindow(self):\n return True\n\n# End of class TextValidator\n","repo_name":"diffpy/diffpy.pdfgui","sub_path":"src/diffpy/pdfgui/gui/wxextensions/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"47"} +{"seq_id":"21068116055","text":"from pyttsx3 import *\r\n\r\nengine=init('sapi5')\r\nvoices=engine.getProperty('voices')\r\n#print(voices[0].id)\r\nengine.setProperty('voice',voices[0].id)\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n speak(\"..........Hello .......User \")\r\n print(\"Enter the statement :-\")\r\n s=input()\r\n speak(s)\r\n","repo_name":"Team-Neurons/hacktoberfest","sub_path":"Python/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"47"} +{"seq_id":"20122247559","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 25 11:15:50 2019\n\n@author: sshaf\nusing SVM classifier for fraud detection \n\n\"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\n\nos.getcwd()\npd.options.display.max_colwidth = 5000\n\n#load data and remove unwanted variables\nmypath='..\\\\data\\\\'\ndf = pd.read_csv (mypath+'feature_selected_fraud_30000.csv')\ndf=df.drop(['Unnamed: 0'],axis=1)\n\n#chi2\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nselector=SelectKBest(chi2,20)\nnewdt=selector.fit_transform(df.drop(labels=['isFraud'], axis=1), df['isFraud'])\nmask=selector.get_support()\n\ncol_names=df.drop(labels=['isFraud'], axis=1).columns\ndf_chi2=df\nfor i in range(0,len(mask)):\n if mask[i]==False:\n df_chi2=df_chi2.drop([col_names[i]],axis='columns')\n \nX=np.array(df_chi2.drop(['isFraud'],axis=1))\ny=np.array(df_chi2['isFraud'])\n\n\nfrom sklearn import svm\nsvm_clf=svm.SVC(kernel='rbf' , degree=2) #kernel='rbf' , gamma =auto,degree=3\nfrom sklearn.model_selection import cross_validate\ncv_results = cross_validate(svm_clf, X, y, cv=5)\n\n\n\nfrom sklearn.model_selection import cross_val_score\ncv_results2 = cross_val_score(svm_clf, X, y, cv=5, scoring='f1')\nprint('f1 score mean:'+str(cv_results2.mean()))\nprint('accuracy mean:'+str(cv_results['test_score'].mean()))\nprint('fit time mean:'+str(cv_results['fit_time'].mean()))\nprint('score time mean:'+str(cv_results['score_time'].mean()))\n\nfold = [1, 2, 3,4,5]\n\nimport matplotlib.pyplot as plt\nplt.plot(fold, cv_results['fit_time'])\nplt.xlabel('fold number')\nplt.ylabel('fit time(s)')\n\n\nplt.plot(fold, cv_results['score_time'])\nplt.xlabel('fold number')\nplt.ylabel('score time (s)')\n\nplt.plot(fold, cv_results['test_score'])\nplt.xlabel('fold number')\nplt.ylabel('accuracy')\n\nplt.plot(fold, cv_results2)\nplt.xlabel('fold number')\nplt.ylabel('f1_score')\n\n\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2, random_state=4)\n#x_train_mod=x_train.reshape(-1,1)\n#x_test_mod=x_test.reshape(-1,1)\n#y_train_mod=y_train.reshape(-1,1)\n#y_test_mod=y_test.reshape(-1,1)\n\n\n#SVM Classifier\nfrom sklearn import svm\nmodel=svm.SVC(kernel='rbf' , degree=3) #kernel='rbf' , gamma =auto,degree=3\nmodel.fit(x_train,y_train)\n\nfrom sklearn.metrics import accuracy_score\n#accuracy=model.score(x_test_mod,y_test_mod)\n#prediction=model.predict(x_test_mod,y_test_mod)\n\ny_pred=model.predict(x_test)\n\nfrom sklearn import metrics\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\nprint(\"f1_score:\",metrics.f1_score(y_test, y_pred))\n","repo_name":"SimaShafaei/Automatic-Fraud-Detection","sub_path":"program/SVM_classifier.py","file_name":"SVM_classifier.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72587208141","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nw = 2\nb = 3\nx_train = np.random.uniform(-10, 10, 20)\n#print(x)\ndef calculate_y(x, w, b):\n y = w * x + b\n noise = np.random.uniform(-0.1 * y, 0.1 * y)\n #print(noise)\n return y + noise\ny_train = calculate_y(x_train, w, b)\ny_train_true = w * x_train + b\n\n\n# Assignment Part 2\n\n\nw_ran = np.random.uniform(0, 1)\nb_ran = np.random.uniform(0, 1)\nlearning_rate = 0.000001\nnum_epochs = 500\nw_list = []\nb_list = []\nmse_list = []\nfor epoch in range(num_epochs):\n for i in range(len(x_train)):\n y_pred = w_ran * x_train[i] + b_ran\n\n error_b = y_train[i] - y_pred\n error_w = (y_train[i] - y_pred) * x_train[i]\n b_ran += learning_rate * error_b\n w_ran += learning_rate * error_w\n\n # Loss\n mse = np.mean((y_pred - y_train[i]) ** 2)\n\n w_list.append(w_ran)\n b_list.append((b_ran))\n mse_list.append(mse)\n print(f\"Epoch {epoch+1}: MSE = {mse_list[epoch]}\")\n# Create a figure and axis for the animation\nfig, ax = plt.subplots()\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Linear Regression Animation')\n\n# Initialize an empty line object\nline, = ax.plot([], [], 'r-', label='Regression Line')\n\n# Initialize the plot with the ground truth line\nax.plot(x_train, y_train, 'go', 'Data Points')\nax.plot(x_train, y_train_true, 'g-', label='Ground Truth Line')\n\n# Define the update function for the animation\ndef update(frame):\n # Clear the current line\n line.set_data([], [])\n\n # Plot the current line represented by the weights\n x_line = np.array([-10, 10])\n y_line = w_list[frame] * x_line + b_list[frame]\n line.set_data(x_line, y_line)\n\n # Set the title with the epoch number\n ax.set_title(f'Epoch {frame+1}')\n\n return line,\n\n# Create the animation using FuncAnimation\nanimation = FuncAnimation(fig, update, frames=num_epochs, interval=100)\n\n# Display the animation\nplt.legend()\nplt.grid(True)\nplt.show()","repo_name":"VennapusaManoj1998/ML-Codes","sub_path":"Linear_Regression/Linear_Regression_1.py","file_name":"Linear_Regression_1.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16471546278","text":"from funcionalidades.testaEntrada import procuraTabela, testaEntrada\n\n\ndef infInternacao(df):\n while True:\n mun = input('Informe o nome do seu município: ').upper()\n if(testaEntrada(mun)): \n break\n \n anos = internacoesAno(df, mun)\n testaImpressao(anos, mun)\n \n \ndef internacoesAno(df, mun):\n cidade = df.loc[df['municipio_residencia'] == mun, :] # filtra a tabela a partir da cidade informada\n \n if(procuraTabela(cidade)):\n # filtra a tabela por intervalo de datas de internção e armazena em variáveis para cada ano\n dezoito = cidade.loc[(cidade['data_internacao'] >= '2018-01-01') & (cidade['data_internacao'] <= '2018-12-31')]\n dezenove = cidade.loc[(cidade['data_internacao'] >= '2019-01-01') & (cidade['data_internacao'] <= '2019-12-31')]\n vinte = cidade.loc[(cidade['data_internacao'] >= '2020-01-01') & (cidade['data_internacao'] <= '2020-12-31')]\n vinteeum = cidade.loc[(cidade['data_internacao'] >= '2021-01-01') & (cidade['data_internacao'] <= '2021-12-31')]\n\n return [dezoito, dezenove, vinte, vinteeum]\n else:\n return 0\n\n\n# a função testa se dever imprimir as informações ou a mensagem de erro\ndef testaImpressao(anos, mun):\n if(anos): # se anos == 0, isso mostra a função de teste encontrou registros do município na tabela\n mostraInfos(anos, mun)\n else: \n print(f'\\nNenhuma cidade com o nome {mun} encontrada na tabela.')\n \n\ndef mostraInfos(anos, mun):\n print(f'\\nNúmero de internações em {mun} por ano entre 2018 e 2021:\\n')\n print(f'Internações em 2018: {len(anos[0])}')\n print(f'Internações em 2019: {len(anos[1])}')\n print(f'Internações em 2020: {len(anos[2])}') # o tamanho de cada posição do vetor representa o número de internações em cada ano\n print(f'Internações em 2021: {len(anos[3])}')\n ","repo_name":"albertoborsatto/DesafioHospitaisDellITAcademy","sub_path":"funcionalidades/consultaInternacao.py","file_name":"consultaInternacao.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40989365817","text":"def solution(wallpaper):\n answer = []\n # x좌표와 y좌표를 담을 집합을 선언(집합을 선언하는 이유 중복되는 좌표를 제거하기 위해)\n x_set, y_set = set(), set()\n \n # y축 순회\n for x in range(len(wallpaper)):\n \n # x축 순회\n for y in range(len(wallpaper[x])):\n \n # 현재 좌표의 값이 '#'이면 집합에 각 좌표 삽입\n if '#' in wallpaper[x][y]:\n x_set.add(x)\n y_set.add(y)\n # 맨 앞쪽과 맨 뒤쪽 좌표는 최솟값과 최댓값 + 1이기 때문에 min, max를 이용하여 반환\n return [min(x_set), min(y_set), max(x_set) + 1, max(y_set) + 1]","repo_name":"jaemoon99/Programmers","sub_path":"프로그래머스/unrated/161990. 바탕화면 정리/바탕화면 정리.py","file_name":"바탕화면 정리.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23846942617","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.decomposition import PCA\nfrom scipy import interpolate\nfrom BaselineRemoval import BaselineRemoval\n\n\nclass vinoPCA:\n\n def __init__(self, Data, numberOfEachSamples):\n\n \"\"\"\n :param Data: The data on wich PCA should be done.\n :param colormap: An iterable that contains how many of each samples there is in Data, in the good order.\n \"\"\"\n\n self.Data = Data\n self.numberOfEachSamples = numberOfEachSamples\n\n def getColorMap(self):\n\n \"\"\"\n Creats a colormap to differentiate the samples in the transformed plot\n :return: Return a colormap to visualise different samples on the plot.\n \"\"\"\n\n for i in range(0, len(self.numberOfEachSamples)):\n if i == 0:\n colormap = np.zeros(self.numberOfEachSamples[0])\n else:\n colormap = np.append(colormap, np.ones(self.numberOfEachSamples[i]) *5*i)\n\n return colormap\n\n def removeFLuo(self, Data):\n\n \"\"\"\n Remove fluorescence background from the data given.\n :param Data: The Data from witch you wish to remove fluo background.\n :return: A new set of Data without the background.\n \"\"\"\n\n nm = Data[:, 1]\n cm = 1 / (632.8e-9) - 1 / (nm * 1e-9)\n size = np.ma.size(Data, 1)\n polynomial_degree = 100\n filtered_datas = np.zeros(shape=(800, size - 1))\n\n # for column in range(2, size):\n # y = Data[:, column]\n # d = 25\n # f2 = interpolate.interp1d(cm[199:][::d], y[199:][::d], kind='quadratic')\n # y = y[200:1000] - f2(cm[200:1000])\n # y = (y - min(y)) / max(y - min(y))\n # filt_datas[:, column - 1] = y\n # filt_datas[:, 0] = cm[200:1000]\n\n for column in range(2, size):\n spectre = Data[200:1000, column]\n baseObj = BaselineRemoval(spectre)\n values = baseObj.IModPoly(polynomial_degree)\n # values = values - min(values) # Si tu normalises, tu perds les composants communs (Alcool particulèrement)\n # values = values/max(values) # tu perds aussi le degrés de présence (Plus ou moins bouchonné ?)\n # Si tu normalises pas, tu favorises les composants communs présents à\n # différents degrés (Plus ou moins d'alcool). Donc tester avec et sans?\n filtered_datas[:, column - 1] = values\n\n filtered_datas[:, 0] = Data[200:1000, 1]\n\n return filtered_datas\n\n def doPCA(self, n:int):\n\n \"\"\"\n Apply PCA on the data given. Redimentionalize in n value of eigenvectors\n :param n: number of componants to get from the PCA\n :return: Returns nothing. Just creats an array of the transformed datas into the new vector space\n \"\"\"\n\n new_Datas = self.removeFLuo(self.Data)\n new_Datas = np.transpose(new_Datas)\n self.X_PCA = PCA(n_components=n)\n self.X_reduced = self.X_PCA.fit_transform(new_Datas[1:, :])\n\n def showTransformedData3D(self):\n\n \"\"\"\n Plots the data transformed in the new vector space with the three firsts eigenvectors\n :return: None\n \"\"\"\n\n plt.clf()\n fig = plt.figure(1, figsize=(8, 6))\n ax = Axes3D(fig, elev=-150, azim=110)\n ax.scatter(\n self.X_reduced[:700, 0],\n self.X_reduced[:700, 1],\n self.X_reduced[:700, 2],\n c=self.getColorMap(),\n cmap='nipy_spectral',\n s=10)\n ax.set_title(\"First three PCA directions\")\n ax.set_xlabel(\"1st eigenvector\")\n ax.w_xaxis.set_ticklabels([])\n ax.set_ylabel(\"2nd eigenvector\")\n ax.w_yaxis.set_ticklabels([])\n ax.set_zlabel(\"3rd eigenvector\")\n ax.w_zaxis.set_ticklabels([])\n plt.show()\n\n def showTransformedData2D(self):\n\n \"\"\"\n Plots the data transformed in the new vector space with the two firsts eigenvectors\n :return: None\n \"\"\"\n\n plt.clf()\n plt.figure(2)\n plt.scatter(self.X_reduced[:700, 0], self.X_reduced[:700, 1], c=self.getColorMap(), cmap='nipy_spectral', s=10)\n plt.title('First two PCA directions')\n plt.xlabel('1st eigenvector')\n plt.ylabel('2nd eigenvector')\n plt.show()\n\n def showTransformData1D(self):\n\n \"\"\"\n :return: Plots the data transformed in the new vector space along the first eigenvector\n \"\"\"\n pass\n\n def getAllEigenvectors(self):\n\n \"\"\"\n Function to get all of the eigenvectors created\n :return: an array of n eigenvector\n \"\"\"\n\n return self.X_PCA.components_.transpose()\n\n def showEigenvectors(self):\n\n \"\"\"\n Function to visualise eigenvectors\n :return: None\n \"\"\"\n plt.figure(3)\n plt.title('1st eigenvector')\n plt.plot(self.X_PCA.components_.transpose()[:, 0])\n plt.figure(4)\n plt.title('2nd eigenvector')\n plt.plot(self.X_PCA.components_.transpose()[:, 1])\n plt.figure(5)\n plt.title('3rd eigenvector')\n plt.plot(self.X_PCA.components_.transpose()[:, 2])\n plt.show()\n\n def getTransformedDatas(self):\n\n \"\"\"\n Gives the transformed datas as an array.\n :return: transformed datas\n \"\"\"\n\n return self.X_reduced\n\n def getScreeValues(self):\n\n \"\"\"\n Gives the percentage of representation for each new eigenvectors\n :return: array of the scree values, from most important to least\n \"\"\"\n\n return self.X_PCA.explained_variance_ratio_\n\n def plotScreeValues(self):\n\n \"\"\"\n Creat a scree plot with the eigenvectors\n :return: None\n \"\"\"\n\n pass\n\n\nif __name__ == \"__main__\":\n\n iterable = [31, 30, 30, 30, 80, 31, 33, 31, 30, 30, 30, 30, 30, 30, 30, 30, 104, 30, 30] # sans vin blanc parceque ça shit le aspect ratio\n Data = np.genfromtxt('/Users/Shooshoo/PycharmProjects/PCA_DCCLab/DataVino_Sorted.csv', delimiter=',')\n\n my_Spectrums = vinoPCA(Data, iterable)\n my_Spectrums.doPCA(10)\n my_Spectrums.showTransformedData3D()\n my_Spectrums.showTransformedData2D()\n my_Spectrums.showEigenvectors()\n","repo_name":"shooshoo1997/PyVino","sub_path":"PyVino.py","file_name":"PyVino.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"69840199824","text":"from typing import Optional, List, Type, TYPE_CHECKING\n\nimport protocol0.domain.lom.instrument.instrument as instrument_package\nfrom protocol0.domain.lom.device.Device import Device\nfrom protocol0.domain.lom.device.DrumRackDevice import DrumRackDevice\nfrom protocol0.domain.lom.device.PluginDevice import PluginDevice\nfrom protocol0.domain.lom.device.RackDevice import RackDevice\nfrom protocol0.domain.lom.device.SimplerDevice import SimplerDevice\nfrom protocol0.domain.lom.instrument.InstrumentInterface import InstrumentInterface\nfrom protocol0.domain.shared.utils.list import find_if\nfrom protocol0.domain.shared.utils.utils import import_package\nfrom protocol0.shared.logging.Logger import Logger\n\nif TYPE_CHECKING:\n from protocol0.domain.lom.track.simple_track.SimpleTrack import SimpleTrack\n\n\nclass InstrumentFactory(object):\n _INSTRUMENT_CLASSES: List[Type[InstrumentInterface]] = []\n\n @classmethod\n def make_instrument(cls, track: \"SimpleTrack\") -> Optional[InstrumentInterface]:\n \"\"\"\n If the instrument didn't change we keep the same instrument and don't instantiate a new one\n to keep instrument state\n \"\"\"\n\n instrument_device = find_if(\n lambda d: d.is_instrument and not type(d) is RackDevice, track.devices.all\n ) # taking the 1st instrument found\n if instrument_device is None:\n return None\n\n instrument_class = cls._get_instrument_class(instrument_device)\n if instrument_class is None:\n return None\n\n if (\n instrument_class\n and isinstance(track.instrument, instrument_class)\n and track.instrument.device == instrument_device\n ):\n return track.instrument # maintaining state\n else:\n rack_device = track.devices.get_device_or_rack_device(instrument_device)\n if rack_device:\n rack_device.register_observer(track)\n\n return instrument_class(instrument_device, rack_device)\n\n @classmethod\n def _get_instrument_class(cls, device: Device) -> Optional[Type[InstrumentInterface]]:\n # checking for grouped devices\n if isinstance(device, DrumRackDevice):\n from protocol0.domain.lom.instrument.instrument.InstrumentDrumRack import (\n InstrumentDrumRack,\n )\n\n return InstrumentDrumRack\n elif isinstance(device, PluginDevice):\n if not device.enum:\n Logger.warning(f\"plugin device not detected : {device}\")\n return None\n\n for _class in cls._get_instrument_classes():\n if _class.DEVICE == device.enum:\n return _class\n elif isinstance(device, SimplerDevice):\n from protocol0.domain.lom.instrument.instrument.InstrumentSimpler import (\n InstrumentSimpler,\n )\n\n return InstrumentSimpler\n elif device._device.class_display_name == \"Sampler\":\n from protocol0.domain.lom.instrument.instrument.InstrumentSampler import (\n InstrumentSampler,\n )\n\n return InstrumentSampler\n\n return None\n\n @classmethod\n def _get_instrument_classes(cls) -> List[Type[InstrumentInterface]]:\n if not cls._INSTRUMENT_CLASSES:\n import_package(instrument_package)\n cls._INSTRUMENT_CLASSES = InstrumentInterface.__subclasses__()\n\n return cls._INSTRUMENT_CLASSES\n","repo_name":"lebrunthibault/protocol0","sub_path":"p0_script/protocol0/domain/lom/instrument/InstrumentFactory.py","file_name":"InstrumentFactory.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"21343940586","text":"import os\nimport logging\nfrom time import strftime, time\nimport scraper_constants\n\n\nclass ScriptFileDateFixer():\n def __init__(self, script_dir, date_fix_file):\n self.script_dir = script_dir\n self.date_fix_file = date_fix_file\n self.log_file = 'scriptfiledatefixer_{time}.log'.format(time=strftime('%Y-%m-%d %H-%M'))\n\n logging.basicConfig(filename=self.log_file, format='%(levelname)s: %(message)s', level=logging.DEBUG)\n\n def fix_dates(self):\n logging.info('Starting script file date fix process:\\n\\tScript directory:{script_dir}\\n\\tFix file:{fix_file}'\n .format(\n script_dir=self.script_dir,\n fix_file=self.date_fix_file\n )\n )\n start_time = time()\n\n try:\n if os.path.exists(self.script_dir) and os.path.isfile(self.date_fix_file):\n with open(self.date_fix_file, 'r') as fix_file:\n for line in fix_file:\n if not line:\n continue\n \n script_attributes = line.split('\\t')\n script_title = script_attributes[0]\n script_date = script_attributes[1]\n self.update_script_dir(script_title, script_date)\n else:\n raise ValueError('Script directory path or date file does not exist')\n except Exception as e:\n logging.error('An error occurred: ' + str(e))\n \n total_time = time() - start_time\n logging.info('Total time: ' + str(total_time))\n\n def update_script_dir(self, script_title, script_date):\n clean_title = scraper_constants.clean_script_title(script_title)\n script_letter = script_title[0]\n if script_letter.isalpha():\n search_dir = '/'.join([self.script_dir, script_letter])\n else:\n search_dir = '/'.join([self.script_dir, '0'])\n \n if os.path.exists(search_dir):\n sub_dirs = os.listdir(search_dir)\n script_matches = [sub_dir for sub_dir in sub_dirs if clean_title == sub_dir[:sub_dir.rfind('_')]]\n if script_matches:\n for script_match in script_matches:\n dir_to_update = '/'.join([search_dir, script_match])\n os.rename(dir_to_update, dir_to_update.replace(scraper_constants.DATE_TOKEN, script_date))\n else:\n logging.error('No match found for ' + clean_title)\n else:\n raise ValueError('Search directory path not found: ' + search_dir)\n","repo_name":"allenbkim/nlc-script-database","sub_path":"scraper/script_file_date_fixer.py","file_name":"script_file_date_fixer.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17092901373","text":"from SCRIBES.SignalConnectionManager import SignalManager\n\nclass Manager(SignalManager):\n\n\tdef __init__(self, editor):\n\t\tSignalManager.__init__(self)\n\t\tself.__init_attributes(editor)\n\t\tself.connect(editor, \"fullscreen\", self.__fullscreen_cb, True)\n\t\tself.connect(editor, \"quit\", self.__quit_cb)\n\n\tdef __init_attributes(self, editor):\n\t\tself.__editor = editor\n\t\tself.__view = editor.textview\n\t\tself.__justification = self.__view.get_justification()\n\t\tself.__lmargin = self.__view.get_left_margin()\n\t\tself.__rmargin = self.__view.get_right_margin()\n\t\treturn False\n\n\tdef __destroy(self):\n\t\tself.disconnect()\n\t\tdel self\n\t\treturn False\n\n\tdef __update(self, fullscreen):\n\t\tself.__editor.freeze()\n\t\tself.__view.set_property(\"show-right-margin\", False if fullscreen else self.__margin())\n\t\tself.__view.set_property(\"show-line-numbers\", False if fullscreen else True)\n\t\tself.__view.set_property(\"highlight-current-line\", False if fullscreen else True)\n\t\tself.__view.set_left_margin(self.__adjust_margin() if fullscreen else self.__lmargin)\n\t\tself.__view.set_right_margin(self.__adjust_margin() if fullscreen else self.__rmargin)\n\t\tfrom gobject import idle_add\n\t\tidle_add(self.__move_view_to_cursor)\n\t\treturn False\n\n\tdef __move_view_to_cursor(self):\n\t\tself.__editor.move_view_to_cursor(True)\n\t\tself.__editor.thaw()\n\t\treturn False\n\n\tdef __adjust_margin(self):\n\t\twidth = self.__view.get_visible_rect()[2]\n\t\treturn int(0.25 * width)\n\n\tdef __margin(self):\n\t\tlanguage = self.__editor.language\n\t\tlanguage = language if language else \"plain text\"\n\t\tfrom SCRIBES.DisplayRightMarginMetadata import get_value as show_margin\n\t\treturn show_margin(language)\n\n\tdef __fullscreen_cb(self, editor, fullscreen):\n\t\tfrom gobject import idle_add\n\t\tidle_add(self.__update, fullscreen, priority=9999)\n\t\treturn False\n\n\tdef __quit_cb(self, *args):\n\t\tself.__destroy()\n\t\treturn False\n","repo_name":"mystilleef/scribes","sub_path":"SCRIBES/GUI/MainGUI/View/FullscreenManager.py","file_name":"FullscreenManager.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41423507526","text":"# -*- coding: EUC-KR -*-\nimport sys\n# print(\"Python\", \"Java\", \"Javascript\", sep=\",\", end=\"?\\n\")\n# print(\"무엇이 더 재밌을까요?\")\n# lang = input()\n\n# if (lang == \"Javascript\"):\n# print(\"자바 스크립트 언어\")\n# elif (lang == \"Java\"):\n# print(\"자바는 객체 지향 언어\")\n# elif (lang == \"Python\"):\n# print(\"파이썬 프로그래밍\")\n# else:\n# print(\"잘못된 입력\")\n\n# print(\"Python\", \"Java\", file=sys.stdout)\n# print(\"Python\", \"Java\", file=sys.stderr)\n\nscores = {\"수학\":0, \"영어\":50, \"코딩\":100}\nfor subject, score in scores.items():\n # print(subject, score)\n print(subject.ljust(8), str(score).rjust(4), sep=\":\")\n\n# 은행 대기순번표\n# 001, 002, 003, ...\nfor num in range(1, 21):\n print(\"대기번호 : \" + str(num).zfill(3))\n\nanswer = input(\"아무 값이나 입력하세요 : \")\nprint(type(answer))\n\n\n","repo_name":"B-JayU/Python_study","sub_path":"Python basic/ch07. 입출력/stdinout.py","file_name":"stdinout.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25146035078","text":"\"\"\"\nGiven a binary tree, return all values given a certain height h.\n\nHere's a starting point:\n\nclass Node():\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\ndef valuesAtHeight(root, height):\n # Fill this in.\n\n# 1\n# / \\\n# 2 3\n# / \\ \\\n# 4 5 7\n\na = Node(1)\na.left = Node(2)\na.right = Node(3)\na.left.left = Node(4)\na.left.right = Node(5)\na.right.right = Node(7)\nprint valuesAtHeight(a, 3)\n# [4, 5, 7]\n\n\"\"\"\nclass Node():\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\ndef valuesAtHeight(root, height):\n # Fill this in.\n result = []\n \n # recursive function inside the function\n def nodesHeight(root, k, height):\n\n # when root is empty, will break out of the function\n if root is None: \n return \n \n # once the current height reaches the target height, add into results\n if k == height: \n result.append(root.value)\n \n # recursive to get left side adn right side of tree\n nodesHeight(root.left, k+1, height) \n nodesHeight(root.right, k+1, height) \n \n # start current height as 1\n nodesHeight(root, 1, height)\n return result\n\n# 1\n# / \\\n# 2 3\n# / \\ \\\n# 4 5 7\n\na = Node(1)\na.left = Node(2)\na.right = Node(3)\na.left.left = Node(4)\na.left.right = Node(5)\na.right.right = Node(7)\nprint(valuesAtHeight(a, 3))\n# [4, 5, 7]\n","repo_name":"weixiangtoh/daily_interview","sub_path":"GetValuesAtCertainHeightBinaryTree.py","file_name":"GetValuesAtCertainHeightBinaryTree.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1142783033","text":"from samples import *\r\nimport json\r\n\r\nclass dividas(Conexao):\r\n def __init__(self):\r\n Conexao.__init__(self)\r\n\r\n def db_insert(self, idIntegracao, id_cloud, idPessoa, idReceitaDiversaLancto, idEconomico, idContribMelhoriaImovel, idCreditoTributario, idSimulacao, idGuia, idImovel, \r\n dataVencimento, dataInscricao, dataLancamento, livro, folha, inscricao, posicao, processoInscricao, situacaoDivida, valorInscrito,\r\n valorCorrecao, valorJuro, valorMulta, guiaComplementar, parcela, anoLivro, ano, idMotivoEstorno, dataEstorno, processoEstorno, usuarioEstorno, idContribuicaoMelhoria, \r\n das, daf, codDeclaracaoSimples, valorSaldo, simplesNacional, idNotaAvulsa, idIndexador, idReceitasDiversas, idTransferenciaImoveis, idObras, idDivida, penhora, possuiCdaEmitida,\r\n anoCda, nroCda):\r\n try: \r\n sql = \"\"\"\r\n INSERT INTO dividas ( \r\n idIntegracao, \r\n id_cloud, \r\n idPessoa,\r\n idReceitaDiversaLancto, \r\n idEconomico, \r\n idContribMelhoriaImovel,\r\n idCreditoTributario,\r\n idGuia,\r\n idImovel,\r\n idSimulacao,\r\n dataVencimento, \r\n dataInscricao,\r\n dataLancamento,\r\n livro,\r\n folha,\r\n inscricao,\r\n posicao,\r\n processoInscricao,\r\n situacaoDivida,\r\n valorInscrito,\r\n valorCorrecao,\r\n valorJuro,\r\n valorMulta, \r\n guiaComplementar,\r\n parcela, \r\n anoLivro,\r\n ano,\r\n idMotivoEstorno, \r\n dataEstorno, \r\n processoEstorno,\r\n usuarioEstorno,\r\n idContribuicaoMelhoria,\r\n das,\r\n daf,\r\n codDeclaracaoSimples,\r\n valorSaldo,\r\n simplesNacional,\r\n idNotaAvulsa, \r\n idIndexador, \r\n idReceitasDiversas, \r\n idTransferenciaImoveis, \r\n idObras, \r\n idDivida, \r\n penhora, \r\n possuiCdaEmitida, \r\n anoCda,\r\n nroCda\r\n ) VALUES (\r\n %(idIntegracao)s, \r\n %(id_cloud)s,\r\n %(idPessoa)s,\r\n %(idReceitaDiversaLancto)s,\r\n %(idEconomico)s,\r\n %(idContribMelhoriaImovel)s,\r\n %(idCreditoTributario)s,\r\n %(idGuia)s,\r\n %(idImovel)s,\r\n %(idSimulacao)s,\r\n %(dataVencimento)s, \r\n %(dataInscricao)s,\r\n %(dataLancamento)s,\r\n %(livro)s,\r\n %(folha)s,\r\n %(inscricao)s,\r\n %(posicao)s,\r\n %(processoInscricao)s,\r\n %(valorCorrecao)s,\r\n %(valorInscrito)s, \r\n %(situacaoDivida)s,\r\n %(valorCorrecao)s, \r\n %(valorJuro)s,\r\n %(valorMulta)s,\r\n %(guiaComplementar)s,\r\n %(parcela)s,\r\n %(anoLivro)s,\r\n %(ano)s,\r\n %(idMotivoEstorno)s,\r\n %(dataEstorno)s,\r\n %(processoEstorno)s,\r\n %(usuarioEstorno)s, \r\n %(idContribuicaoMelhoria)s,\r\n %(das)s,\r\n %(daf)s,\r\n %(codDeclaracaoSimples)s,\r\n %(valorSaldo)s,\r\n %(simplesNacional)s,\r\n %(idNotaAvulsa)s,\r\n %(idIndexador)s,\r\n %(idReceitasDiversas)s,\r\n %(idTransferenciaImoveis)s, \r\n %(idObras)s,\r\n %(idDivida)s,\r\n %(penhora)s,\r\n %(possuiCdaEmitida)s,\r\n %(anoCda)s,\r\n %(nroCda)s\r\n )\r\n \"\"\"\r\n data = dict (\r\n idIntegracao = idIntegracao,\r\n id_cloud = id_cloud, \r\n idPessoa = idPessoa,\r\n idReceitaDiversaLancto = idReceitaDiversaLancto,\r\n idEconomico = idEconomico, \r\n idContribMelhoriaImovel = idContribMelhoriaImovel,\r\n idCreditoTributario = idCreditoTributario,\r\n idGuia = idGuia, \r\n idImovel = idImovel,\r\n idSimulacao = idSimulacao,\r\n dataVencimento = dataVencimento, \r\n dataInscricao = dataInscricao,\r\n dataLancamento = dataLancamento, \r\n livro = livro,\r\n folha = folha,\r\n inscricao = inscricao, \r\n posicao = posicao,\r\n processoInscricao = processoInscricao,\r\n valorCorrecao = valorCorrecao,\r\n valorInscrito = valorInscrito,\r\n situacaoDivida = situacaoDivida,\r\n valorJuro = valorJuro,\r\n valorMulta = valorMulta, \r\n guiaComplementar = guiaComplementar, \r\n parcela = parcela, \r\n anoLivro = anoLivro, \r\n ano = ano, \r\n idMotivoEstorno = idMotivoEstorno, \r\n dataEstorno = dataEstorno, \r\n processoEstorno = processoEstorno, \r\n usuarioEstorno = usuarioEstorno, \r\n idContribuicaoMelhoria = idContribuicaoMelhoria, \r\n das = das, \r\n daf = daf, \r\n codDeclaracaoSimples = codDeclaracaoSimples, \r\n valorSaldo = valorSaldo, \r\n simplesNacional = simplesNacional, \r\n idNotaAvulsa = idNotaAvulsa, \r\n idIndexador = idIndexador, \r\n idReceitasDiversas = idReceitasDiversas, \r\n idTransferenciaImoveis = idTransferenciaImoveis, \r\n idObras = idObras, \r\n idDivida = idDivida, \r\n penhora = penhora, \r\n possuiCdaEmitida = possuiCdaEmitida, \r\n anoCda = anoCda, \r\n nroCda = nroCda\r\n )\r\n self.execute(sql, data)\r\n self.commit()\r\n send_log_info(f\"Agrupamentos {dividas} (id_cloud: {id_cloud}) inserido com sucesso.\")\r\n except Exception as contribuintesr:\r\n send_log_error(f\"contribuintes ao inserir o anistias {dividas}. {contribuintesr}\")\r\n\r\n def db_delete(self):\r\n try:\r\n sql_s = f\"SELECT * FROM dividas\"\r\n if not self.query(sql_s):\r\n send_log_warning(f\"dividas não encontrado para excluir.\")\r\n return\r\n sql_d = f\"DELETE FROM dividas WHERE id is not null\"\r\n self.execute(sql_d)\r\n self.commit()\r\n send_log_info(f\"anistias excluídos com sucesso.\")\r\n except Exception as contribuintesr:\r\n send_log_error(f\"contribuintes ao executar a operação de exclusão do atividades econômicas. {contribuintesr}\")\r\n\r\n def db_update(self, id, id_cloud, json, mensagem):\r\n try:\r\n sql_s = f\"SELECT * FROM dividas WHERE id = {id}\"\r\n if not self.query(sql_s):\r\n send_log_warning(f\"atividades Economicas {id} não encontrado para atualizar.\")\r\n return\r\n sql = \"\"\"\r\n UPDATE \r\n dividas \r\n SET \r\n id_cloud = %(id_cloud)s,\r\n json_post = %(json)s,\r\n resposta_post = %(mensagem)s\r\n WHERE\r\n id = %(id)s\r\n \"\"\"\r\n data = dict (\r\n id = id,\r\n id_cloud = id_cloud,\r\n json = json,\r\n mensagem = mensagem\r\n )\r\n self.execute(sql, data)\r\n self.commit()\r\n send_log_info(f\"atividades Economicas {id} atualizado com sucesso.\")\r\n except Exception as contribuintesr:\r\n send_log_error(f\"contribuintes ao executar a operação de atualização da atividades Economicas. {contribuintesr}\")\r\n\r\n def db_search(self, id):\r\n try:\r\n sql = f\"SELECT * FROM dividas WHERE id = {id}\"\r\n data = self.query(sql)\r\n if data:\r\n return data\r\n send_log_info(f\"atividades Economicas {id} não encontrado.\")\r\n except Exception as contribuintesr:\r\n send_log_error(f\"contribuintes ao executar a operação de busca. {contribuintesr}\")\r\n\r\n def db_list(self):\r\n try:\r\n sql = \"SELECT * FROM dividas WHERE id_cloud is null\"\r\n data = self.query(sql)\r\n if data:\r\n send_log_info(\"Consulta de todos os atividades Economicas realizada com sucesso.\")\r\n return data\r\n return None\r\n except Exception as contribuintesr:\r\n send_log_error(f\"contribuintes ao executar a operação de busca. {contribuintesr}\")\r\n\r\n def get_id_cloud(self, id):\r\n if (id == None):\r\n return None\r\n try:\r\n sql = f\"SELECT id_cloud FROM dividas WHERE id_origem = {id}\"\r\n data = self.query(sql)\r\n if data:\r\n return data[0][0]\r\n send_log_info(f\"atosFontesDivulgacoes {id} não encontrado.\")\r\n except Exception as contribuintesr:\r\n send_log_error(f\"contribuintes ao executar a operação de busca. {contribuintesr}\")\r\n\r\n def send_post(self, id, idPessoa, idReceitaDiversaLancto, idEconomico, idContribMelhoriaImovel, idCreditoTributario, idSimulacao, idGuia, idImovel, \r\n dataVencimento, dataInscricao, dataLancamento, livro, folha, inscricao, posicao, processoInscricao, situacaoDivida, valorInscrito,\r\n valorCorrecao, valorJuro, valorMulta, guiaComplementar, parcela, anoLivro, ano, idMotivoEstorno, dataEstorno, processoEstorno, usuarioEstorno, idContribuicaoMelhoria, \r\n das, daf, codDeclaracaoSimples, valorSaldo, simplesNacional, idNotaAvulsa, idIndexador, idReceitasDiversas, idTransferenciaImoveis, idObras, idDivida, penhora,\r\n possuiCdaEmitida, anoCda, nroCda):\r\n objeto = {\r\n \"idIntegracao\": f\"Atos{id}\",\r\n \"content\": {}\r\n }\r\n if idPessoa:\r\n objeto[\"content\"][\"VctoFeriado\"] = { \"id\": int(idPessoa)}\r\n \r\n if idEconomico:\r\n objeto[\"content\"][\"idEconomico\"] = { \"id\": int(idEconomico)}\r\n \r\n if idReceitaDiversaLancto:\r\n objeto[\"content\"][\"idReceitaDiversaLancto\"] = { \"id\": int(idReceitaDiversaLancto)}\r\n \r\n if idContribMelhoriaImovel:\r\n objeto[\"content\"][\"idContribMelhoriaImovel\"] = { \"id\": int(idContribMelhoriaImovel)}\r\n \r\n if posicao:\r\n objeto[\"content\"][\"posicao\"] = f\"{posicao}\"\r\n \r\n if processoInscricao:\r\n objeto[\"content\"][\"processoInscricao\"] = f\"{processoInscricao}\"\r\n \r\n if idCreditoTributario:\r\n objeto[\"content\"][\"idCreditoTributario\"] = { \"id\": int(idCreditoTributario)}\r\n \r\n if idGuia:\r\n objeto[\"content\"][\"idGuia\"] = { \"id\": int(idGuia)}\r\n \r\n if situacaoDivida:\r\n objeto[\"content\"][\"situacaoDivida\"] = f\"{situacaoDivida}\" \r\n\r\n if valorJuro:\r\n objeto[\"content\"][\"valorJuro\"] = { \"id\": int(valorJuro) }\r\n \r\n if inscricao:\r\n objeto[\"content\"][\"inscricao\"] = f\"{inscricao}\" \r\n\r\n if valorInscrito:\r\n objeto[\"content\"][\"valorInscrito\"] = f\"{valorInscrito}\"\r\n\r\n if valorCorrecao:\r\n objeto[\"content\"][\"valorCorrecao\"] = f\"{valorCorrecao}\"\r\n\r\n if idSimulacao:\r\n objeto[\"content\"][\"idSimulacao\"] = { \"id\": int(idSimulacao)} \r\n\r\n if idImovel:\r\n objeto[\"content\"][\"idImovel\"] = { \"id\": int(idImovel)} \r\n\r\n if dataVencimento:\r\n objeto[\"content\"][\"dataVencimento\"] = f\"{dataVencimento}\"\r\n\r\n if dataInscricao:\r\n objeto[\"content\"][\"dataInscricao\"] = f\"{dataInscricao}\"\r\n\r\n if dataLancamento:\r\n objeto[\"content\"][\"dataLancamento\"] = f\"{dataLancamento}\"\r\n\r\n if valorMulta:\r\n objeto[\"content\"][\"valorMulta\"] = f\"{valorMulta}\" \r\n\r\n if guiaComplementar:\r\n objeto[\"content\"][\"guiaComplementar\"] = f\"{guiaComplementar}\" \r\n\r\n if parcela:\r\n objeto[\"content\"][\"parcela\"] = f\"{parcela}\"\r\n\r\n if anoLivro:\r\n objeto[\"content\"][\"anoLivro\"] = f\"{anoLivro}\"\r\n\r\n if ano:\r\n objeto[\"content\"][\"ano\"] = f\"{ano}\" \r\n\r\n if idMotivoEstorno:\r\n objeto[\"content\"][\"idMotivoEstorno\"] = { \"id\": int(idMotivoEstorno)}\r\n\r\n if dataEstorno:\r\n objeto[\"content\"][\"dataEstorno\"] = f\"{dataEstorno}\"\r\n\r\n if processoEstorno:\r\n objeto[\"content\"][\"processoEstorno\"] = f\"{processoEstorno}\"\r\n\r\n if usuarioEstorno:\r\n objeto[\"content\"][\"usuarioEstorno\"] = f\"{usuarioEstorno}\" \r\n \r\n if idContribuicaoMelhoria:\r\n objeto[\"content\"][\"idContribuicaoMelhoria\"] = { \"id\": int(idContribuicaoMelhoria)}\r\n \r\n if das:\r\n objeto[\"content\"][\"das\"] = f\"{das}\"\r\n\r\n if daf:\r\n objeto[\"content\"][\"daf\"] = f\"{daf}\" \r\n\r\n if codDeclaracaoSimples:\r\n objeto[\"content\"][\"codDeclaracaoSimples\"] = f\"{codDeclaracaoSimples}\"\r\n\r\n if valorSaldo:\r\n objeto[\"content\"][\"valorSaldo\"] = f\"{valorSaldo}\"\r\n\r\n if simplesNacional:\r\n objeto[\"content\"][\"simplesNacional\"] = f\"{simplesNacional}\" \r\n\r\n if idNotaAvulsa:\r\n objeto[\"content\"][\"idNotaAvulsa\"] = { \"id\": int(idNotaAvulsa)}\r\n\r\n if idIndexador:\r\n objeto[\"content\"][\"idIndexador\"] = { \"id\": int(idIndexador)}\r\n\r\n if idReceitasDiversas:\r\n objeto[\"content\"][\"idReceitasDiversas\"] = { \"id\": int(idReceitasDiversas)}\r\n\r\n if idTransferenciaImoveis:\r\n objeto[\"content\"][\"idTransferenciaImoveis\"] = { \"id\": int(idTransferenciaImoveis)} \r\n \r\n if idObras:\r\n objeto[\"content\"][\"idObras\"] = { \"id\": int(idObras)}\r\n \r\n if idDivida:\r\n objeto[\"content\"][\"guiaComplementar0\"] = { \"id\": int(idDivida)} \r\n \r\n if penhora:\r\n objeto[\"content\"][\"penhora\"] = f\"{penhora}\"\r\n \r\n if possuiCdaEmitida:\r\n objeto[\"content\"][\"possuiCdaEmitida\"] = f\"{possuiCdaEmitida}\" \r\n \r\n if anoCda:\r\n objeto[\"content\"][\"anoCda\"] = f\"{anoCda}\",\r\n \r\n if nroCda:\r\n objeto[\"content\"][\"nroCda\"] = f\"{nroCda}\"\r\n \r\n if folha != None:\r\n objeto[0][\"calculotributario\"][\"creditotributario\"] = f\"{folha}\" \r\n \r\n if livro:\r\n objeto[\"content\"][\"livro\"] = f\"{livro}\" \r\n\r\n envio = api_post(\"dividas\", objeto)\r\n\r\n if (envio[\"code\"] == 200 or envio[\"code\"] == 201):\r\n self.db_update(id, envio[\"mensagem\"], json.dumps(objeto, ensure_ascii=False), None)\r\n else:\r\n self.db_update(id, None, json.dumps(objeto), json.dumps(envio[\"mensagem\"], ensure_ascii=False))\r\n\r\ndividas = dividas()","repo_name":"MarcosRBasso/TributosExes","sub_path":"records/dividas.py","file_name":"dividas.py","file_ext":"py","file_size_in_byte":16320,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1371492213","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"RDMC\",\n version=\"0.1.0\",\n author=\"Xiaorui Dong, Lagnajit Pattanaik, Shih-Cheng Li, Kevin Spiekermann, Hao-Wei Pang, and William H. Green\",\n author_email=\"xiaorui@mit.com\",\n description=\"A light-weight software package with expertise in handling Reaction Data and Molecular (including transitions states) Conformers.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/xiaoruiDong/RDMC\",\n packages=find_packages(),\n install_requires=['numpy',\n 'scipy',\n 'pandas',\n 'rdkit>=2021.03.1',\n 'openbabel-wheel>=3.1.1',\n 'networkx',\n 'py3Dmol',\n 'ase',\n 'matplotlib',\n 'cclib',\n 'ipywidgets', # view molecules (not required to specify when using conda/mamba)\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Chemistry\"\n ],\n keywords=\"chemistry, RDKit, molecule, conformer, reaction, cheminformatics\",\n license=\"MIT License\",\n python_requires='>=3.6',\n platforms=[\"Any.\"],\n)\n","repo_name":"xiaoruiDong/RDMC","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"47"} +{"seq_id":"15433268293","text":"import sys\nsys.stdout = open('fctrl/output.txt', 'w')\nsys.stdin = open('fctrl/input.txt', 'r')\n \ndef fact(x):\n count = 0\n div = 5\n \n while (x / div >= 1):\n \n count += int(x / div)\n div *= 5\n return count\n\n\nt = int(input())\n\nfor x in range(t):\n number = int(input())\n print(fact(number))\n \n\n\n\n \n \n \n\n\n\n\n","repo_name":"praveen-x/nighttowl-Codechef","sub_path":"fctrl/fctrl.py","file_name":"fctrl.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11389493433","text":"\"\"\"pytorch trainer.\n\n1. 测试时,model.eval()一定要调用,不然如果model中有BatchNorm及Dropout等,会按训练的情况(如Dropout会生效)\n2. 确保metric计算正常\n3. 注意lr的设置\n4. 多观察训练集与验证集的指标,以确定是过拟合还是欠拟合\n\n@author: huangwm\n\"\"\"\nimport time\nimport random\nimport logging\nimport numpy as np\nimport tensorflow as tf\nimport sklearn.metrics as skm\nfrom collections import defaultdict\nfrom tensorflow.keras import Model, regularizers\nfrom tensorflow.python.data.ops.dataset_ops import DatasetV2 as Dataset\nfrom tensorflow.python.keras import backend as keras_backend\nfrom tensorflow.python.framework.ops import EagerTensor\n\nfrom progress import master_bar, progress_bar\n\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n tf.config.experimental.set_visible_devices(gpus[0], 'GPU')\n tf.config.experimental.set_memory_growth(gpus[0], True)\ntf.debugging.set_log_device_placement(False)\n\n\ndef set_seed(seed: int = 1):\n \"\"\"设置随机种子.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed=seed)\n\n\n# noinspection DuplicatedCode\nclass Learner(object):\n \"\"\"tensorflow trainer.\"\"\"\n\n def __init__(self,\n model: Model,\n train_ds: Dataset,\n valid_ds: Dataset = None,\n valid_batch: int = -1,\n collate_fn=None,\n loss_func: tf.keras.losses.Loss = None,\n optim_func: type = None,\n device: tf.device = tf.device(\"/gpu:0\"),\n batch_size: int = 128,\n wd: float = 1e-5,\n lr: float = 0.01,\n lr_scheduler=None,\n metrics: dict = None,\n train_callbacks: list = None,\n valid_callbacks: list = None):\n self.model = model\n self.train_ds = train_ds\n self.valid_ds = valid_ds\n self.valid_batch = valid_batch\n self.collate_fn = collate_fn\n self.loss_func = loss_func\n self.optim_class = optim_func\n self.batch_size = batch_size\n self.device = device\n self.lr = lr\n self.lr_scheduler = lr_scheduler\n self.wd = wd\n self.metrics = metrics\n self.train_callbacks = train_callbacks if train_callbacks else []\n self.valid_callbacks = valid_callbacks if valid_callbacks else []\n self.train_metric_vals = defaultdict(float)\n self.valid_metric_vals = defaultdict(float)\n self.train_dl_len = self.train_ds_len = self.valid_dl_len = self.valid_ds_len = 0\n self._init()\n\n def _init(self):\n \"\"\"初始化\"\"\"\n # 1. 获取可用的gpu,限制使用第一块gpu,并打开内存增长\n # 需在最开始设置\n # 2. 构建DataLoader\n self.train_dl = self.train_ds \\\n .shuffle(2*self.batch_size) \\\n .batch(self.batch_size) \\\n .prefetch(self.batch_size)\n if hasattr(self.train_dl, '__len__'):\n self.train_dl_len = len(self.train_dl)\n self.train_ds_len = len(self.train_ds)\n if self.valid_ds:\n self.valid_dl = self.valid_ds \\\n .batch(3*self.batch_size) \\\n .prefetch(self.batch_size)\n if hasattr(self.valid_dl, '__len__'):\n self.valid_dl_len = len(self.valid_dl)\n self.valid_ds_len = len(self.valid_ds)\n # 3. 模型\n self.model = self.model\n # 4. 设置优化器(如果没有设置策略,则表示使用默认的AdamW优化器)\n if not self.optim_class:\n self.optim_class = tf.keras.optimizers.Adam\n self.optim_func: tf.optimizers.Optimizer = self.optim_class(learning_rate=self.lr)\n # TODO self.model.parameters(), lr=self.lr, weight_decay=self.wd)\n if self.wd > 0:\n for layer in self.model.layers:\n layer.kernel_regularizer = regularizers.l2(self.wd)\n # 5. 设置损失函数(如果没有设置,则默认使用交叉熵损失)\n if not self.loss_func:\n self.loss_func = tf.keras.losses.BinaryCrossentropy()\n else:\n self.loss_func = self.loss_func\n # 6. 设置metrics\n if not self.metrics:\n self.metrics = dict()\n self.metric_names = []\n self.metric_keys = list(self.metrics.keys())\n for name in [\"loss\"] + self.metric_keys:\n self.metric_names.append(f\"t_{name}\")\n self.metric_names.append(f\"v_{name}\")\n # 7. 设置学习率策略(如果没有设置策略,则表示不改变学习率,则gamma值为1)\n if not self.lr_scheduler:\n self.lr_scheduler = LrScheduler.get_step_lr(step_size=1, gamma=1)\n else:\n self.lr_scheduler = self.lr_scheduler\n # 8. 显示信息\n logging.info(\"=\" * 80)\n logging.info(f\"learner info: \")\n logging.info(f\"train ds: {self.train_ds_len} samples, \"\n f\"{self.train_dl_len} batches.\")\n if self.valid_ds:\n logging.info(f\"valid ds: {self.valid_ds} samples, \"\n f\"{self.valid_dl} batches.\")\n logging.info(f\"lr: {self.lr}, lr scheduler: {vars(self.lr_scheduler)}\")\n logging.info(f\"weight decay: {self.wd}\")\n logging.info(f\"loss: {self.loss_func}\")\n logging.info(f\"optim: {self.optim_func}\")\n logging.info(f\"batch size: {self.batch_size}\")\n logging.info(f\"metrics: {self.metrics}\")\n logging.info(f\"train callbacks: {self.train_callbacks}\")\n logging.info(f\"valid callbacks: {self.valid_callbacks}\")\n logging.info(f\"collate_fn: {self.collate_fn}\")\n logging.info(\"=\" * 80)\n\n # noinspection DuplicatedCode\n def train(self, epochs):\n \"\"\"训练\"\"\"\n # 打印展示的指标名\n mb = master_bar(range(epochs))\n mb.write([\"epoch\"] + self.metric_names + [\"lr\", \"time\"], table=True)\n # 开始训练\n total_batch = 0\n info = dict()\n for callback in self.train_callbacks:\n callback.on_train_begin(info)\n for epoch in mb:\n info[\"epoch\"] = epoch\n epoch_start_time = time.time()\n # 开始第epoch个训练\n for callback in self.train_callbacks:\n callback.on_epoch_begin(info)\n train_loss = 0\n valid_loss = 0\n batch_idx = 0\n self.train_metric_vals.clear()\n for (x, y) in progress_bar(self.train_dl,\n total=self.train_dl_len if self.train_dl_len else 0,\n parent=mb):\n info[\"x\"], info[\"y\"] = x, y\n # 开始第batch_idx批次的训练\n for callback in self.train_callbacks:\n callback.on_batch_begin(info)\n # 数据listy\n if not isinstance(info[\"x\"], (tuple, list)):\n info[\"x\"] = [info[\"x\"]]\n if not isinstance(info[\"y\"], (tuple, list)):\n info[\"y\"] = [info[\"y\"]]\n with tf.GradientTape() as tape:\n # 模型计算\n info[\"outputs\"] = self.model(*info[\"x\"])\n # 计算损失\n for callback in self.train_callbacks:\n callback.on_loss_begin(info)\n loss = self.loss_func(*info[\"y\"], info[\"outputs\"])\n train_loss += loss.numpy()\n # 记录当前的训练的损失\n mb.child.comment = f\"train loss: {train_loss / (batch_idx + 1):.4f}, \" \\\n f\"valid loss: {valid_loss:.4f}\"\n # 梯度回传\n for callback in self.train_callbacks:\n callback.on_backward_begin(info)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n # 梯度更新\n for callback in self.train_callbacks:\n callback.on_step_begin(info)\n self.optim_func.apply_gradients(zip(gradients, self.model.trainable_variables))\n # metric\n for callback in self.train_callbacks:\n callback.on_metric_begin(info)\n for metric_name, metric in self.metrics.items():\n self.train_metric_vals[metric_name] += metric(info[\"outputs\"], *info[\"y\"])\n # writer valid\n total_batch += 1\n if self.valid_batch > 0 and total_batch % self.valid_batch == 0:\n valid_loss = self._valid(mb)\n mb.child.comment = f\"train loss: {train_loss / (batch_idx + 1):.4f}, \" \\\n f\"valid loss: {valid_loss:.4f}\"\n for callback in self.train_callbacks:\n callback.on_batch_end(info)\n batch_idx += 1\n for callback in self.train_callbacks:\n callback.on_epoch_end(info)\n # 更新指标\n if not hasattr(self.train_dl, '__len__') or len(self.train_dl) == 0:\n self.train_dl_len = batch_idx + 1\n train_loss = train_loss / self.train_dl_len\n for metric_name in self.train_metric_vals.keys():\n self.train_metric_vals[metric_name] /= self.train_dl_len\n valid_loss = self._valid(mb)\n # logging\n epoch_end_time = time.time()\n log_info = [str(epoch), f\"{train_loss:.4f}\", f\"{valid_loss:.4f}\"]\n for key in self.metric_keys:\n if isinstance(self.train_metric_vals[key], float):\n log_info.append(f\"{self.train_metric_vals[key]:.4f}\")\n log_info.append(f\"{self.valid_metric_vals[key]:.4f}\")\n else:\n log_info.append(str(self.train_metric_vals[key]))\n log_info.append(str(self.valid_metric_vals[key]))\n log_info.append(f\"{self.optim_func.lr.numpy():.6f}\")\n log_info.append(f\"{epoch_end_time - epoch_start_time:.4f}\")\n mb.write(log_info, table=True)\n # 更新lr策略\n lr = float(keras_backend.get_value(self.optim_func.lr))\n lr = self.lr_scheduler(epoch, lr)\n keras_backend.set_value(self.optim_func.lr, keras_backend.get_value(lr))\n for callback in self.train_callbacks:\n callback.on_train_end(info)\n\n def _valid(self, mb):\n \"\"\"验证.\"\"\"\n if not self.valid_ds:\n return 0\n valid_loss = 0\n self.valid_metric_vals.clear()\n outputs_list, ys = [], []\n info = dict()\n batch_idx = 0\n for (x, y) in progress_bar(self.valid_dl,\n total=self.valid_dl_len if self.valid_dl_len else 0,\n parent=mb):\n info[\"x\"], info[\"y\"], info[\"batch_idx\"] = x, y, batch_idx\n if not isinstance(x, (tuple, list)):\n info[\"x\"] = [info[\"x\"]]\n if not isinstance(info[\"y\"], (tuple, list)):\n info[\"y\"] = [info[\"y\"]]\n for callback in self.valid_callbacks:\n callback.on_batch_begin(info)\n info[\"outputs\"] = self.model(*info[\"x\"])\n \"\"\"\n if len(info[\"outputs\"].shape) == 0:\n # 有时候最后一个batch的大小只有1,而如果模型返回时,直接squeeze(),则其shape为[]\n # 而我们期待的是[batch_ize],所以此时需要reshape(按理来说应该由模型保证)\n info[\"outputs\"] = tf.reshape(info[\"outputs\"], (-1,))\n \"\"\"\n for callback in self.valid_callbacks:\n callback.on_loss_begin(info)\n info[\"loss\"] = self.loss_func(*info[\"y\"], info[\"outputs\"])\n for callback in self.valid_callbacks:\n callback.on_metric_begin(info)\n valid_loss += info[\"loss\"].numpy()\n outputs_list.append(info[\"outputs\"])\n ys.append(*info[\"y\"])\n batch_idx += 1\n if not hasattr(self.valid_dl, '__len__') or len(self.valid_dl) == 0:\n self.valid_dl_len = batch_idx + 1\n for callback in self.valid_callbacks:\n callback.on_epoch_end(info)\n outputs_list = tf.concat(outputs_list, axis=0)\n ys = tf.concat(ys, axis=0)\n for metric_name in self.metrics.keys():\n self.valid_metric_vals[metric_name] = self.metrics[metric_name](outputs_list, ys)\n valid_loss /= self.valid_dl_len\n return valid_loss\n\n\nclass LrScheduler(object):\n @staticmethod\n def get_step_lr(step_size, gamma=0.1):\n \"\"\"\n Decays the learning rate of each parameter.\n\n example:\n StepLR(optimizer, step_size=30, gamma=0.1)\n # lr = 0.05 if epoch < 30\n # lr = 0.005 if 30 <= epoch < 60\n # lr = 0.0005 if 60 <= epoch < 90\n\n :param step_size: 每step_size个epoch对学习率进行衰减\n :param gamma: 衰减因子 default: 0.1\n :return:\n \"\"\"\n def scheduler(epoch, lr):\n if (epoch+1) % step_size == 0:\n return lr * gamma\n else:\n return lr\n return scheduler\n\n\nclass Metrics(object):\n \"\"\"metrics.\"\"\"\n\n @staticmethod\n def accuracy_score(inputs: EagerTensor,\n targs: EagerTensor,\n axis: int = -1,\n just_score: bool = False):\n \"\"\"Compute accuracy with `targ` when `pred` is bs * n_classes\"\"\"\n inputs, targs = inputs.numpy(), targs.numpy()\n if just_score:\n # 说明inputs为1的score\n inputs = np.stack([1 - inputs, inputs], axis=1)\n preds = inputs.argmax(axis=axis)\n else:\n # 说明inputs为0和1的score\n preds = inputs.argmax(axis=axis)\n acc = skm.accuracy_score(preds.reshape(-1,), targs.reshape(-1,))\n return acc\n\n @staticmethod\n def recall_score(inputs: EagerTensor,\n targs: EagerTensor,\n axis: int = -1,\n average: str = 'binary',\n just_score: bool = False):\n \"\"\"Compute recall with `targ` when `pred` is bs * n_classes\"\"\"\n inputs, targs = inputs.numpy(), targs.numpy()\n if just_score:\n # 说明inputs为1的score\n inputs = np.stack([1 - inputs, inputs], axis=1)\n preds = inputs.argmax(axis=axis)\n else:\n # 说明inputs为0和1的score\n preds = inputs.argmax(axis=axis)\n recall = skm.recall_score(preds.reshape(-1,),\n targs.reshape(-1,),\n average=average,\n zero_division=0)\n return recall\n\n @staticmethod\n def precision_score(inputs: EagerTensor,\n targs: EagerTensor,\n axis: int = -1,\n average: str = 'binary',\n just_score: bool = False):\n \"\"\"Compute precision with `targ` when `pred` is bs * n_classes\"\"\"\n inputs, targs = inputs.numpy(), targs.numpy()\n if just_score:\n # 说明inputs为1的score\n inputs = np.stack([1 - inputs, inputs], axis=1)\n preds = inputs.argmax(axis=axis)\n else:\n # 说明inputs为0和1的score\n preds = inputs.argmax(axis=axis)\n precision = skm.precision_score(preds.reshape(-1,),\n targs.reshape(-1,),\n average=average,\n zero_division=0)\n return precision\n\n @staticmethod\n def f1_score(inputs: EagerTensor,\n targs: EagerTensor,\n axis: int = -1,\n average: str = 'binary',\n just_score: bool = False):\n \"\"\"Compute f1 score with `targ` when `pred` is bs * n_classes\"\"\"\n inputs, targs = inputs.numpy(), targs.numpy()\n if just_score:\n # 说明inputs为1的score\n inputs = np.stack([1 - inputs, inputs], axis=1)\n preds = inputs.argmax(axis=axis)\n else:\n # 说明inputs为0和1的score\n preds = inputs.argmax(axis=axis)\n f1 = skm.f1_score(preds.reshape(-1,),\n targs.reshape(-1,),\n average=average,\n zero_division=0)\n return f1\n\n @staticmethod\n def auc_roc_score(outputs: EagerTensor,\n targs: EagerTensor):\n \"\"\"计算auc(area under the curve)(只适用于二分类).\n\n :param outputs: (np.ndarray)预测概率值(batchsize,)\n :param targs: (np.ndarray)标签(batchsize,)\n \"\"\"\n\n def roc_curve(predicts: np.ndarray,\n targets: np.ndarray):\n \"\"\"计算receiver operator characteristic (ROC)曲线. 先得到不同阈值下的TPR和FPR\n (针对sigmoid的输出).\n\n :param predicts: (np.ndarray)预测概率值(batchsize,)\n :param targets: (np.ndarray)标签(batchsize,)\n \"\"\"\n # 设outputs和targs的格式分别为[0.1, 0.8, 0.6, 0.3]和[1, 1, 1, 0]\n # 1. 根据input的概率值对input和targ进行从高到低重新排序\n desc_score_indices = np.argsort(-predicts)\n predicts = predicts[desc_score_indices]\n targets = targets[desc_score_indices]\n # 2. roc曲线不是每个点都要记录,只需记录有值的点即可,以下threshold_idxs为有值点下标\n diffs = predicts[1:] - predicts[:-1]\n distinct_indices = np.nonzero(diffs)[0]\n threshold_idxs = np.concatenate(\n (distinct_indices, [len(targets) - 1]))\n # 3. 计算tps(true positives sum)/fps(false positives sum)\n # fps的计算:threshold_idxs的值ele,表示只有前ele+1个元素被认为是正样本(下标从0开始)\n # 所以fps = threshold_idxs + 1 - tps (ele+1个元素不是正样本就是负样本)\n tps = np.cumsum(targets)[threshold_idxs]\n fps = threshold_idxs + 1 - tps\n if tps[0] != 0 or fps[0] != 0:\n tps = np.concatenate(([0], tps))\n fps = np.concatenate(([0], fps))\n # 4. 计算tpr(true positive rate)/fpr(false positive rate)\n fpr_ = fps.astype(np.float) / (fps[-1] + 1e-8)\n tpr_ = tps.astype(np.float) / (tps[-1] + 1e-8)\n return fpr_, tpr_\n\n inputs, targs = outputs.numpy(), targs.numpy()\n # 1. 计算fpr和tpr\n fpr, tpr = roc_curve(outputs, targs)\n # 2. 计算auc\n # fpr为横坐标,tpr为纵坐标,通过计算每一小块矩形的面积(xi*yi),再相加得到auc\n # diffs为一系列小矩形的宽:[x1, x2, ...., xn]\n widths = fpr[1:] - fpr[:-1]\n heights = (tpr[:-1] + tpr[1:]) / 2\n auc = (widths * heights).sum()\n return auc\n\n\nclass Callback(object):\n \"\"\"Base class for callbacks that want to record values, dynamically change learner params, etc.\"\"\"\n\n def on_train_begin(self, info: dict):\n pass\n\n def on_epoch_begin(self, info: dict):\n pass\n\n def on_batch_begin(self, info: dict):\n pass\n\n def on_loss_begin(self, info: dict):\n pass\n\n def on_backward_begin(self, info: dict):\n pass\n\n def on_step_begin(self, info: dict):\n pass\n\n def on_metric_begin(self, info: dict):\n pass\n\n def on_batch_end(self, info: dict):\n pass\n\n def on_epoch_end(self, info: dict):\n pass\n\n def on_train_end(self, info: dict):\n pass\n","repo_name":"miny0401/deep_learner","sub_path":"learner/tf_learner.py","file_name":"tf_learner.py","file_ext":"py","file_size_in_byte":19905,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"6071686172","text":"def distance(move):\n return int(move[1:])\n\ndef trace(wire):\n x = 0;\n y = 0;\n trace = []\n for move in wire:\n dy = 0;\n dx = 0;\n dist = distance(move)\n print(f\"{move} = {dist}\");\n if move[0]=='D':\n for i in range(1, dist+1):\n dy = -i;\n trace.append(f\"{x+dx},{y+dy}\")\n if move[0]=='U':\n for i in range(1, dist+1):\n dy = i;\n trace.append(f\"{x+dx},{y+dy}\")\n if move[0]=='L':\n for i in range(1, dist+1):\n dx = -i;\n trace.append(f\"{x+dx},{y+dy}\")\n if move[0]=='R':\n for i in range(1, dist+1):\n dx = i;\n trace.append(f\"{x+dx},{y+dy}\")\n x = x + dx\n y = y + dy\n return trace\n\ndef manhattan(point):\n x,y = point.split(',');\n return (abs(int(x))+abs(int(y)));\n\nwith open(\"data/wires\") as f:\n lines = f.readlines()\n wires = []\n traces = []\n for line in lines:\n wire = line.strip().split(\",\")\n wires.append(wire)\n for wire in wires:\n traces.append(trace(wire));\n\n overlap = list(set(traces[0]) & set(traces[1]))\n min_dist = 100000000000000000000000\n min_point = ''\n print(overlap)\n for point in overlap:\n m = manhattan(point)\n if m > 0 and m < min_dist:\n min_dist = m\n min_point = point\n print(f\"{point} {m} {min_dist}\")\n\n print(f\"MANHATTAN = {min_dist} @ {min_point}\")\n \n min_steps = 100000000000000000000000\n for point in overlap:\n a = traces[0].index(point)+1;\n b = traces[1].index(point)+1;\n steps = a+b\n print(f\"{steps} = {a} {b}\")\n if steps < min_steps and steps > 0 :\n min_steps = a+b\n\n\n print(f\"MIN STEPS = {min_steps}\")\n","repo_name":"agarithm/2019_AOC","sub_path":"03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22768317169","text":"\nimport pygame\nimport math\nimport random\nimport time\nfrom itertools import cycle\n\npygame.init()\n# pygame.mixer.init()\n\n\nscreen = pygame.display.set_mode((800, 600))\npygame.display.set_caption('Planes')\n\n# setting clock\nclock = pygame.time.Clock()\n\n# colors:\ncolors = cycle(((0, 255, 0), (10, 255, 0), (20, 255, 0), (30, 255, 0), (40, 255, 0), (50, 255, 0), (60, 255, 0), (70, 255, 0), (80, 255, 0), (90, 255, 0), (100, 255, 0), (110, 255, 0), (120, 255, 0), (130, 255, 0), (140, 255, 0), (150, 255, 0), (160, 255, 0), (170, 255, 0), (180, 255, 0), (190, 255, 0), (200, 255, 0), (210, 255, \n0), (220, 255, 0), (230, 255, 0), (240, 255, 0), (250, 255, 0), (255, 255, 0), (255, 245, 0), (255, 235, 0), (255, 225, 0), (255, 215, 0), (255, 205, 0), (255, 195, 0), (255, 185, 0), (255, 175, 0), (255, 165, 0), (255, 155, 0), (255, 145, 0), (255, 135, 0), (255, 125, 0), (255, 115, 0), (255, 105, 0), (255, 95, 0), (255, 85, 0), (255, 75, 0), (255, 65, 0), (255, 55, 0), (255, 45, 0), (255, 35, 0), (255, 25, 0), (255, 15, 0), (255, 5, 0), (255, 0, 0), (255, 0, 10), (255, 0, 20), (255, 0, 30), (255, 0, 40), (255, 0, 50), (255, 0, 60), (255, 0, 70), (255, 0, 80), (255, 0, 90), (255, 0, 100), (255, 0, 110), (255, 0, 120), (255, 0, 130), (255, 0, 140), (255, 0, 150), (255, 0, 160), (255, 0, 170), (255, 0, 180), (255, 0, 190), (255, 0, 200), (255, 0, 210), (255, 0, 220), (255, 0, 230), (255, 0, 240), (255, 0, 250), (255, 0, 255), (245, 0, 255), (235, 0, 255), (225, 0, 255), (215, 0, 255), (205, 0, 255), (195, 0, 255), (185, 0, 255), (175, \n0, 255), (165, 0, 255), (155, 0, 255), (145, 0, 255), (135, 0, 255), (125, 0, 255), (115, 0, 255), (105, 0, 255), (95, 0, 255), (85, 0, 255), (75, 0, 255), (65, 0, 255), (55, 0, 255), (45, 0, 255), (35, 0, 255), (25, 0, 255), (15, 0, 255), (5, 0, 255), (0, 0, 255), (0, 10, 255), (0, 20, 255), (0, 30, 255), (0, 40, \n255), (0, 50, 255), (0, 60, 255), (0, 70, 255), (0, 80, 255), (0, 90, 255), (0, 100, 255), (0, 110, 255), (0, 120, 255), (0, 130, 255), (0, 140, 255), (0, 150, 255), (0, 160, 255), (0, 170, 255), (0, 180, 255), (0, 190, 255), (0, 200, 255), (0, 210, 255), (0, 220, 255), (0, 230, 255), (0, 240, 255), (0, 250, 255)))\n\n# game configurations\nstraight = True\nrandomy = False\neavoid = False\noavoid = False\nb_avoid = False\nb_follow = False\npfollow = False\nefollow = False\nwallhax = False\nyreflect = False\nxreflect = False\ndeflect = False\nphase = False\nlines = True\nshort = True\ndrawobj = True\nportal = False\n\nclass gameobject():\n\tdef __init__(self, image, x, y, angle):\n\t\tself.ox = x\n\t\tself.oy = y\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.image = image\n\t\tself.rotated_image = self.image\n\t\tself.ded = False\n\t\tself.angle = angle\n\n\tdef rotateright(self ):\n\t\tself.angle -= 2\n\t\tself.angle %= 360\n\n\tdef rotateleft(self ):\n\t\tself.angle += 2\n\t\tself.angle %= 360\n\n\tdef setpos(self, diff):\n\t\tself.x += diff*(math.cos(math.radians(self.angle)))\n\t\tself.y -= diff*(math.sin(math.radians(self.angle)))\n\n\tdef updategame(self ):\n\t\tif self.y <= dy[0] or self.y >= dy[1]:\n\t\t\tif self.y <= dy[0]:\n\t\t\t\tself.y = dy[1]\n\t\t\telse:\n\t\t\t\tself.y = dy[0]\n\t\telif self.x <= dx[0] or self.x >= dx[1]:\n\t\t\tif self.x <= dx[0]:\n\t\t\t\tself.x = dx[1]\n\t\t\telse:\n\t\t\t\tself.x = dx[0]\n\t\tif self.ded == False and drawobj:\n\t\t\tself.rotated_image = pygame.transform.rotate(self.image, self.angle)\n\t\t\tscreen.blit(self.rotated_image, (self.x - int(self.rotated_image.get_width()/2), self.y - int(self.rotated_image.get_height()/2)))\n\t\telif drawobj and self.ded:\n\t\t\tself.ded = False\n\t\t\tself.x, self.y = self.ox, self.oy\n\n# class bulletobject\nclass bulletobject():\n\tdef __init__(self, image, enemy, origin, bulletspeed):\n\t\tself.image = image\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.enemy = enemy\n\t\tself.origin = origin\n\t\tself.bulletspeed = bulletspeed\n\t\tself.xbulletspeed = bulletspeed\n\t\tself.angle = self.origin.angle\n\t\tself.ready = False\n\n\n\tdef collisioncheck(self):\n\t\t# distance to self\n\t\todistance = math.sqrt((math.pow(self.x - self.origin.x, 2)) + (math.pow(self.y - self.origin.y, 2)))\n\t\t# distance to enemy\n\t\tedistance = math.sqrt((math.pow(self.x - self.enemy.x, 2)) + (math.pow(self.y - self.enemy.y, 2)))\n\t\t# distance travelled by a bullet in one frame plus 40\n\t\tdist = abs(self.bulletspeed)*delta\n\n\t\tif len(ready) > 1 and (b_avoid or b_follow or deflect or lines):\n\t\t\t# distance to next bullet\n\t\t\tliveb1 = [(i.x , i.y) for i in ready if i != self]\n\t\t\tliveb2 = [math.sqrt((math.pow(self.x - i[0], 2)) + (math.pow(self.y - i[1], 2))) for i in liveb1]\n\t\t\tliveb4 = [i for i in ready if i!= self]\n\n\t\t\tif lines and short:\n\t\t\t# draws line btw closest bullet\n\t\t\t\tliveb3 = {i:j for i, j in zip(liveb2, liveb1)}\n\t\t\t\tx, y = liveb3[min(liveb2)]\n\t\t\t\tpygame.draw.line(screen, color, (self.x + 12.5, self.y + 12.5), (x + 12.5, y + 12.5))\n\n\n\t\t\tif b_avoid or b_follow or deflect or (lines and not short):\n\t\t\t\tfor i, j, k in zip(liveb1, liveb2, liveb4):\n\t\t\t\t\tif lines and not short:\n\t\t\t\t\t\tpygame.draw.line(screen, color, (self.x + 12.5, self.y + 12.5), (i[0] + 12.5, i[1] + 12.5))\n\t\t\t\t\tif b_avoid:\n\t\t\t\t\t# avoiding other bullets\n\t\t\t\t\t\tif j <= 25:\n\t\t\t\t\t\t\tif self.x >= i[0]:\n\t\t\t\t\t\t\t\tself.x += dist\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.x -= dist\n\t\t\t\t\t\t\tif self.y >= i[1]:\n\t\t\t\t\t\t\t\tself.y += dist\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.y -= dist\n\t\t\t\t\tif b_follow:\n\t\t\t\t\t# bullets follow other bullets\n\t\t\t\t\t\tif j <= 18:\n\t\t\t\t\t\t\tif self.x >= i[0]:\n\t\t\t\t\t\t\t\tself.x -= dist\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.x += dist\n\t\t\t\t\t\t\tif self.y >= i[1]:\n\t\t\t\t\t\t\t\tself.y -= dist\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.y += dist\n\t\t\t\t\tif deflect:\n\t\t\t\t\t\tif j <= 25:\n\t\t\t\t\t\t\t# if self.x < i[0]:\n\t\t\t\t\t\t\t# \tif self.bulletspeed > 0:\n\t\t\t\t\t\t\t# \t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t# \tif self.xbulletspeed < 0:\n\t\t\t\t\t\t\t# \t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\t\t\t# if self.y < i[1]:\n\t\t\t\t\t\t\t# \tif self.bulletspeed > 0:\n\t\t\t\t\t\t\t# \t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t# \tif self.bulletspeed < 0:\n\t\t\t\t\t\t\t# \t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\t\t\t\tself.angle = abs(self.angle - k.angle)\n\n\t\tif yreflect:\n\t\t\t# bullets reflect at y boundary\n\t\t\tif self.y - 5 <= yr[0]:\n\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\telif self.y + 5 >= yr[1] :\n\t\t\t\tself.bulletspeed = -self.bulletspeed\n\n\t\tif xreflect:\n\t\t\t# bullets reflect at x boundary\n\t\t\tif self.x - 5 <= xr[0]:\n\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\telif self.x + 5 >= xr[1]:\n\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\n\t\tif eavoid:\n\t\t\t# enemy avoiding behavior\n\t\t\tif edistance <= 50:\n\t\t\t\tif deflect:\n\t\t\t\t\t# delfects outside enemy barrier\n\t\t\t\t\tif self.x < self.enemy.x:\n\t\t\t\t\t\tif self.bulletspeed > 0:\n\t\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.xbulletspeed < 0:\n\t\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\tif self.y < self.enemy.y:\n\t\t\t\t\t\tif self.bulletspeed > 0:\n\t\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.bulletspeed < 0:\n\t\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\n\t\t\t\tif self.x >= self.enemy.x:\n\t\t\t\t\tself.x += dist\n\t\t\t\telse:\n\t\t\t\t\tself.x -= dist\n\t\t\t\tif self.y >= self.enemy.y:\n\t\t\t\t\tself.y += dist\n\t\t\t\telse:\n\t\t\t\t\tself.y -= dist\n\n\t\tif oavoid:\n\t\t\t# origin avoiding behavior\n\t\t\tif odistance < 50:\n\t\t\t\tif deflect or xreflect or yreflect:\n\t\t\t\t\t# delfects outside barrier\n\t\t\t\t\tif self.x < self.origin.x:\n\t\t\t\t\t\tif self.bulletspeed > 0:\n\t\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.xbulletspeed < 0:\n\t\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\tif self.y < self.origin.y:\n\t\t\t\t\t\tif self.bulletspeed > 0:\n\t\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.bulletspeed < 0:\n\t\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\n\t\t\t\tif self.x >= self.origin.x:\n\t\t\t\t\tself.x += dist\n\t\t\t\telse:\n\t\t\t\t\tself.x -= dist\n\t\t\t\tif self.y >= self.origin.y:\n\t\t\t\t\tself.y += dist\n\t\t\t\telse:\n\t\t\t\t\tself.y -= dist\n\n\t\tif efollow:\n\t\t\t# enemy following behavior\n\t\t\tif edistance >= 90:\n\t\t\t\tif deflect or xreflect or yreflect:\n\t\t\t\t\t# delfects within barrier\n\t\t\t\t\tif self.x > self.enemy.x and self.xbulletspeed > 0:\n\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\telif self.x < self.enemy.x and self.xbulletspeed < 0:\n\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\tif self.y > self.enemy.y and self.bulletspeed > 0:\n\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\t\telif self.y < self.enemy.y and self.bulletspeed < 0:\n\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\n\t\t\t\tif self.x > self.enemy.x:\n\t\t\t\t\tself.x -= dist\n\t\t\t\telif self.enemy.x < self.x:\n\t\t\t\t\tself.x += dist\n\t\t\t\tif self.y > self.enemy.y:\n\t\t\t\t\tself.y -= dist\n\t\t\t\telif self.enemy.y < self.y:\n\t\t\t\t\tself.y += dist\n\n\t\tif pfollow:\n\t\t\t# player following behavior\n\t\t\tif odistance >= 90:\n\t\t\t\tif deflect or xreflect or yreflect:\n\t\t\t\t\t# deflects within barrier\n\t\t\t\t\tif self.x > self.origin.x and self.xbulletspeed > 0:\n\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\telif self.x < self.origin.x and self.xbulletspeed < 0:\n\t\t\t\t\t\tself.xbulletspeed = -self.xbulletspeed\n\t\t\t\t\tif self.y > self.origin.y and self.bulletspeed > 0:\n\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\t\telif self.y < self.origin.y and self.bulletspeed < 0:\n\t\t\t\t\t\tself.bulletspeed = -self.bulletspeed\n\t\t\t\tif self.x > self.origin.x:\t\t\n\t\t\t\t\tself.x -= dist\n\t\t\t\telif self.x < self.origin.x:\n\t\t\t\t\tself.x += dist\n\t\t\t\tif self.y > self.origin.y:\n\t\t\t\t\tself.y -= dist\n\t\t\t\telif self.y < self.origin.y:\n\t\t\t\t\tself.y += dist\n\t\tif wallhax:\n\t\t\t# prevents bullets going past edge\n\t\t\tif self.y <= yr[0] - 3 or self.y >= yr[1] + 3:\n\t\t\t\tif self.y <= yr[0] - 3:\n\t\t\t\t\tself.y += dist + 3\n\t\t\t\telse:\n\t\t\t\t\tself.y -= dist + 3\n\t\t\telif self.x <= xr[0] - 3 or self.x >= xr[1] + 3:\n\t\t\t\tif self.x <= xr[0] - 3:\n\t\t\t\t\tself.x += dist + 3\n\t\t\t\telse:\n\t\t\t\t\tself.x -= dist + 3\n\t\tif phase == False: \n\t\t\t# deletes bullet and ship\n\t\t\tif edistance <= 10:\n\t\t\t\tself.ready = False\n\t\t\t\tself.enemy.ded = True\n\t\t\t\tscore()\n\t\t\t\tready.remove(self)\n\n\tdef setstart(self, x, y):\n\t\tself.x = x - int(self.origin.rotated_image.get_width()/2)\n\t\tself.y = y - int(self.origin.rotated_image.get_height()/2)\n\t\tself.ready = True\n\n\tdef fire(self):\n\t\tif portal:\n\t\t\t# moves to opposite edge\n\t\t\tif self.y <= dy[0] or self.y >= dy[1]:\n\t\t\t\tif self.y <= dy[0]:\n\t\t\t\t\tself.y = dy[1]\n\t\t\t\telse:\n\t\t\t\t\tself.y = dy[0]\n\t\t\telif self.x <= dx[0] or self.x >= dx[1]:\n\t\t\t\tif self.x <= dx[0]:\n\t\t\t\t\tself.x = dx[1]\n\t\t\t\telse:\n\t\t\t\t\tself.x = dx[0]\n\t\telse:\n\t\t\t# deletes bullet at edges\n\t\t\tif self.y <= dy[0] or self.y >= dy[1]:\n\t\t\t\tself.ready = False\n\t\t\t\tready.remove(self)\n\t\t\telif self.x <= dx[0] or self.x >= dx[1]:\n\t\t\t\tself.ready = False\n\t\t\t\tready.remove(self)\n\n\t\tif straight:\n\t\t\t# staight line\n\t\t\tself.y += self.bulletspeed*delta*(math.sin(math.radians(self.angle)))\n\t\t\tself.x -= self.xbulletspeed*delta*(math.cos(math.radians(self.angle)))\n\t\tif randomy:\n\t\t\t# random direction\n\t\t\tself.x += random.randint(- 3, 3)\n\t\t\tself.y -= random.randint(- 3, 3)\n\n\t\tself.collisioncheck()\n\n\t\tif drawobj:\n\t\t\tscreen.blit( self.image, (self.x, self.y))\n\n\n\n\nyellow = pygame.transform.scale(pygame.image.load('paper-plane.png'), (25, 25))\npurple = pygame.transform.scale(pygame.image.load('paper-plane - Copy.png'), (25, 25))\n\n# plane\nplayer = gameobject(yellow, 375, 485, angle = 270)\n# enemy plane\nenemy = gameobject(purple, 375, 115, angle = 90)\n\n\n\n# setting parameters for all bullets going to be created\nsx = 25\nsy = 25\n\nim2 = pygame.transform.scale(pygame.image.load('rec.png'), (sx, sy))\n\nim1 = pygame.transform.scale(pygame.image.load('rec - Copy.png'), (sx, sy))\nxr = (0, 775)\nyr = (0, 575)\ndx = (-30, 815)\ndy = (-30, 615)\n\nbs = 150\n\n# formatting configuration menu\ndef multlines(text, configs, fontsize):\n\ttext = text.replace('True', 'ON').replace('False', 'OFF').splitlines()\n\tfor i, j in enumerate(text):\n\t\tif j[-1] == 'N':\n\t\t\tscreen.blit(configs.render(j, True, (128,255,102)), (0, fontsize*i))\n\t\telse:\n\t\t\tscreen.blit(configs.render(j, True, (255, 255, 120)), (0, fontsize*i))\n\ndef update():\n\tglobal ppoint, epoint, scoreswitch\n\tif pyupdate:\n\t\t# drawing backround\n\t\tscreen.fill((30, 20, 30))\n\n\t# # draws scoreboard, configs\n\tif scoreswitch:\n\t\tstat = status.render(f' YELLOW: {ppoint} PURPLE: {epoint}', True, (255 , 50, 225))\n\t\tscreen.blit( stat, (0, 0))\n\tif configuration:\n\t\tmultlines(text, configs, 12)\n\t# bullets getting fired\n\tfor i in ready:\n\t\ti.fire()\n\t# update player,enemy position\n\tplayer.updategame()\n\tenemy.updategame()\n\t# creates frame in window\n\tpygame.display.update()\n\n# updates scoreboard\ndef score():\n\tglobal ppoint, epoint\n\tif player.ded == True:\n\t\tepoint += 1\n\telse:\n\t\tppoint += 1\n\n\ndef checksign(check):\n\tif check > 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\nstatus = pygame.font.Font('freesansbold.ttf',32)\nconfigs = pygame.font.Font('freesansbold.ttf',12)\n\npyupdate = True\nscoreswitch = True\nconfiguration = True\nsound = False\nready = []\n\n# points\nppoint = 0\nepoint = 0\n\nrun = True\nepress = True\nppress = True\nconfigcheck = True\ncount = 1\nwhile run:\n\t# returns each event in keyboard\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trun = False\n\t\t\tcontinue\n\t\t\n\t# updating delta value and setting frame rate\n\tdelta = clock.tick(60)/1000\n\t# updating game configs \n\ttext = f'[1]straight: {straight}\\n[2]random: {randomy}\\n[3]enemyavoid: {eavoid}\\n[4]playersavoid: {oavoid}\\n[5]bulletavoid: {b_avoid}\\n[6]bulletfollow: {b_follow}\\n[7]enemyfollow: {efollow}\\n[8]playerfollow: {pfollow}\\n[9]wallborder {wallhax}\\n[0]yreflect: {yreflect}\\n[F1]xreflect: {xreflect}\\n[F2]deflectbullets: {deflect}\\n[F3]phasebullets: {phase}\\n[F4]/[i]short, lines: {short} {lines}\\n[F5]drawobjs: {drawobj}\\n[F6]portal: {portal}\\n[F7]updatesc: {pyupdate}\\n[F8]scoreboard: {scoreswitch}\\n[F9]configs: {configuration}\\n[F10]soundeffect: {sound}\\n[\\]exit\\n[-]/[+]bulletspd: {round(bs, 1)}\\nfps: {round(clock.get_fps(), 2)}, delta: {delta}\\nx, y : {int(player.x)}, {int(player.y)}\\nlivebullets: {len(ready)}\\nangle: {player.angle}'\n\t# updating color\n\tif count % 2 == 0:\n\t\tcolor = next(colors)\n\tcount += 1\n\t# controlling ships\n\tkeys = pygame.key.get_pressed()\n\n\t# allows keypresses 1 - K12 to change game settings\n\tif configcheck:\n\t\tif keys[pygame.K_i]:\n\t\t\tshort = not short\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_1]:\n\t\t\tstraight = not straight\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_2]:\n\t\t\trandomy = not randomy\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_3]:\n\t\t\teavoid = not eavoid\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_4]:\n\t\t\toavoid = not oavoid\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_5]:\n\t\t\tb_avoid = not b_avoid\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_6]:\n\t\t\tb_follow = not b_follow\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_7]:\n\t\t\tefollow = not efollow\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_8]:\n\t\t\tpfollow = not pfollow\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_9]:\n\t\t\twallhax = not wallhax\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_0]:\n\t\t\tyreflect = not yreflect\n\t\t\tconfigcheck = False\t\n\n\t\telif keys[pygame.K_F1]:\n\t\t\txreflect = not xreflect\n\t\t\tconfigcheck = False\t\n\t\t\n\t\telif keys[pygame.K_F2]:\n\t\t\tdeflect = not deflect\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_F3]:\n\t\t\tphase = not phase\n\t\t\tconfigcheck = False\t\n\n\t\telif keys[pygame.K_F4]:\n\t\t\tlines = not lines\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_F5]:\n\t\t\tdrawobj = not drawobj\n\t\t\tconfigcheck = False\n\t\n\t\telif keys[pygame.K_F6]:\n\t\t\tportal = not portal\n\t\t\tconfigcheck = False\n\t\t\n\t\telif keys[pygame.K_F7]:\n\t\t\tpyupdate = not pyupdate\n\t\t\tconfigcheck = False\n\t\t\n\t\telif keys[pygame.K_F8]:\n\t\t\tscoreswitch = not scoreswitch\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_F9]:\n\t\t\tconfiguration = not configuration\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_F10]:\n\t\t\tsound = not sound\n\t\t\tconfigcheck = False\n\t\t\n\t\telif keys[pygame.K_MINUS]:\n\t\t\tbs -= 1\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_EQUALS]:\n\t\t\tbs += 1\n\t\t\tconfigcheck = False\n\n\t\telif keys[pygame.K_BACKSLASH]:\n\t\t\trun = False\n\t\t\tcontinue\n\n\n\t# enemy ship - WASD, player ship - arrow keys\n\tif keys[pygame.K_w]:\n\t\tenemy.setpos(-150*delta)\n\n\tif keys[pygame.K_a]:\n\t\tenemy.rotateleft()\n\t\n\tif keys[pygame.K_s]:\n\t\tenemy.setpos(150*delta)\n\n\tif keys[pygame.K_d]:\n\t\tenemy.rotateright()\n\n\tif keys[pygame.K_UP]:\n\t\tplayer.setpos(-150*delta)\n\n\tif keys[pygame.K_LEFT]:\n\t\tplayer.rotateleft()\n\n\tif keys[pygame.K_DOWN]:\n\t\tplayer.setpos(150*delta)\n\n\tif keys[pygame.K_RIGHT]:\n\t\tplayer.rotateright()\n\n\t# controlling bullets: enemy ship - LEFT SHIFT, player ship - RIGHT SHIFT\n\tif keys[pygame.K_RSHIFT] and ppress == True:\n\t\tif sound:\n\t\t\tpygame.mixer.music.load('muda.mp3')\n\t\t\tpygame.mixer.music.play()\n\t\tbulletp = bulletobject(im1,enemy = enemy, origin = player, bulletspeed = bs)\n\t\tbulletp.setstart(player.x, player.y)\n\t\tready.append(bulletp)\n\t\tppress = False\n\tif keys[pygame.K_LSHIFT] and epress == True:\n\t\tif sound:\n\t\t\tpygame.mixer.music.load('ora.mp3')\n\t\t\tpygame.mixer.music.play()\t\t\n\t\tbullete = bulletobject(im2,enemy = player, origin = enemy, bulletspeed = bs)\n\t\tbullete.setstart(enemy.x, enemy.y)\n\t\tready.append(bullete)\n\t\tepress = False\n\n\t# activate on release\n\tif event.type == pygame.KEYUP:\n\t\tif event.key == pygame.K_RSHIFT:\n\t\t\tppress = True\n\t\t\tconfigcheck = True\n\t\telif event.key == pygame.K_LSHIFT:\n\t\t\tepress = True\n\t\telif configcheck == False and event.key in [pygame.K_x, pygame.K_i, pygame.K_1, pygame.K_2, pygame.K_3, pygame.K_4, pygame.K_4, pygame.K_5, pygame.K_6, pygame.K_7, pygame.K_8, pygame.K_9, pygame.K_0, pygame.K_F1, pygame.K_F2, pygame.K_F3, pygame.K_F4, pygame.K_F5, pygame.K_F6, pygame.K_F7, pygame.K_F8, pygame.K_F9, pygame.K_F10, pygame.K_MINUS, pygame.K_EQUALS, pygame.K_BACKSLASH]:\n\t\t\tconfigcheck = True\n\n\tupdate()\n\n\n\n\n\n\n","repo_name":"Sandwhiches/pygamegame","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":17321,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"74128020301","text":"#-------------\r\n# Bexxkie\r\n# 01-may-2019\r\n# ver 2.0\r\n# QIcoGen\r\n#------------\r\n#\r\n# NOTE: this overwrites existing desktop.ini\r\n#\r\nfrom PIL import Image\r\nimport sys\r\nfrom configparser import RawConfigParser\r\nimport ctypes\r\nimport os\r\n#-- Vars\r\nf = sys.argv[1]\t\t\t\t\t# this will get the dragged file, (if it is not an image, it just closes)\r\ni = Image.open(f)\t\t\t\t# go ahead and load it as an image (otherwise just close like said above)\r\nname = f.split('.')[0]+'.ico'\t# wanna split get the filename and get index 0, so like 001.jpg => [001],[jpg]\r\nico = i.save(name)\t\t\t\t# convert and save the file to the same folder the the dragged image came from\r\ndir = os.path.dirname(f)\r\n#-- Desktop.ini generator\r\nconfig = RawConfigParser()\r\nconfig.optionxform=str\r\ncfFile = open(dir+'\\\\desktop.ini','w')\r\n\r\n# Create the ini sections we want [.ShellClassInfo], [ViewState]\r\nconfig.add_section('.ShellClassInfo')\r\nconfig.add_section('ViewState')\r\n\r\n# Icon Path, autogenerated from the source of the dragged object\r\nconfig.set('.ShellClassInfo','IconResource',name+',0')\r\n# Folder type, should be Video, Generic, Pictures\r\nconfig.set('ViewState','FolderType','Generic')\r\n# write the file to disk (save)\r\nconfig.write(cfFile)\r\n# close the stream\r\ncfFile.close()\r\n# get the dir, not the file\r\nos.chdir(dir)\r\n# set the dir's attributes so windows will use the ini properly\r\nos.system('attrib +S +H desktop.ini')\r\n#\r\n# :Optionals: uncomment to use\r\n# Alerts user when completed (this wont show if the script fails in any way)\r\n#ctypes.windll.user32.MessageBoxW(0, \"Icon created and applied to folder\", \"Done\", 1)\r\n\r\n\r\n#--------\r\n# Extra Information\r\n\r\n# This is what the INI should look like, if it doesnt then change the\r\n#[.ShellClassInfo]\r\n#IconResource=PATH_TO_ICO,0\r\n#[ViewState]\r\n#FolderType= FOLDER TYPE (Pictures, Generic, Video)\r\n\r\n\r\n\r\n# Use if thumbnails are not updated (alternatively rebooting will work)\r\n# Batch file assoc with this script to force thumbnails to be updated\r\n# (run in root folder IE: create icon for Images/CuteCats, run the following script in Images)\r\n#\r\n#\r\n#@echo off\r\n#for /r %%I in (*.ico) do (\r\n# attrib -h -s -r \"%temp%\\desktop.ini\" >nul\r\n# (\r\n# echo [.ShellClassInfo]\r\n# echo IconResource=\"%%~nxI\",0\r\n# )>\"%temp%\\desktop.ini\"\r\n# attrib +h +s \"%temp%\\desktop.ini\"\r\n# (\r\n# echo set shell = CreateObject^(\"Shell.Application\"^)\r\n# echo set folder = shell.NameSpace^(\"%%~dpI\"^)\r\n# echo folder.MoveHere \"%temp%\\desktop.ini\", 4+16+1024\r\n# )>\"%temp%\\updateIcon.vbs\"\r\n# cscript //nologo //b \"%temp%\\updateIcon.vbs\"\r\n#)\r\n#pause\r\n","repo_name":"Bexxkie/QIcoGen","sub_path":"ConvertToIcon_Generic.py","file_name":"ConvertToIcon_Generic.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14374468010","text":"TARGET = 600851475143\n\n\ndef getNextPrime(n):\n global prime\n tar = n + 1\n while True:\n if all(map(lambda x: tar % x != 0, primeList)):\n primeList.append(tar)\n return tar\n else:\n tar += 1\n\n\nprime_max = 1\nprimeList = [2]\nprime = 2\ntar = TARGET\nwhile tar != 1:\n if tar % prime == 0:\n tar = tar // prime\n prime_max = prime\n prime = getNextPrime(prime)\n\nprint(prime_max)\n","repo_name":"katataku/Project-Euler","sub_path":"0003/0003.py","file_name":"0003.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21037520148","text":"import asyncio\nimport contextlib\nimport itertools\nimport operator\nimport os\nimport subprocess\nimport types\nfrom concurrent import futures\nfrom functools import partial\nfrom typing import AnyStr, AsyncIterable, Callable, Iterable, Iterator, Optional\n\n__version__ = '1.4'\n\n\nclass futured(partial):\n \"\"\"A partial function which returns futures.\"\"\"\n\n as_completed: Callable = NotImplemented\n\n def __get__(self, instance, owner):\n return self if instance is None else types.MethodType(self, instance)\n\n @classmethod\n def results(cls, fs: Iterable, *, as_completed=False, **kwargs) -> Iterator:\n \"\"\"Generate results concurrently from futures, by default in order.\n\n Args:\n fs: iterable of futures\n as_completed kwargs: generate results as completed with options, e.g., timeout\n \"\"\"\n tasks = cls.as_completed(fs, **kwargs) if (as_completed or kwargs) else list(fs)\n return map(operator.methodcaller('result'), tasks)\n\n @classmethod\n def items(cls, pairs: Iterable, **kwargs) -> Iterator:\n \"\"\"Generate key, result pairs as completed from futures.\n\n Args:\n pairs: key, future pairs\n **kwargs: as completed options, e.g., timeout\n \"\"\"\n keys = dict(map(reversed, pairs)) # type: ignore\n return ((keys[future], future.result()) for future in cls.as_completed(keys, **kwargs))\n\n def map(self, *iterables: Iterable, **kwargs) -> Iterator:\n \"\"\"Asynchronously map function.\n\n Args:\n **kwargs: keyword options for [results][futured.futured.results]\n \"\"\"\n return self.results(map(self, *iterables), **kwargs)\n\n def starmap(self, iterable: Iterable, **kwargs) -> Iterator:\n \"\"\"Asynchronously starmap function.\n\n Args:\n **kwargs: keyword options for [results][futured.futured.results]\n \"\"\"\n return self.results(itertools.starmap(self, iterable), **kwargs)\n\n def mapzip(self, iterable: Iterable, **kwargs) -> Iterator:\n \"\"\"Generate arg, result pairs as completed.\n\n Args:\n **kwargs: keyword options for [items][futured.futured.items]\n \"\"\"\n return self.items(((arg, self(arg)) for arg in iterable), **kwargs)\n\n @classmethod\n @contextlib.contextmanager\n def waiting(cls, *fs, **kwargs):\n \"\"\"Return context manager which waits on [results][futured.futured.results].\"\"\"\n fs = list(fs)\n try:\n yield fs\n finally:\n fs[:] = cls.results(fs, **kwargs)\n\n class tasks(set):\n \"\"\"A set of futures which iterate as completed, and can be updated while iterating.\"\"\"\n\n wait = staticmethod(futures.wait)\n TimeoutError = futures.TimeoutError\n\n def __init__(self, fs: Iterable, *, timeout=None):\n super().__init__(fs)\n self.options = dict(return_when='FIRST_COMPLETED', timeout=timeout)\n self.it = self.iter()\n\n def iter(self):\n while self:\n done, _ = self.wait(list(super().__iter__()), **self.options)\n if not done:\n raise self.TimeoutError\n self -= done\n yield from done\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.it)\n\n\nclass executed(futured):\n \"\"\"Extensible base class for callables which require a `submit` method.\"\"\"\n\n as_completed = futures.as_completed\n Executor = futures.Executor\n\n def __new__(cls, *args, **kwargs):\n if args:\n return futured.__new__(cls, cls.Executor().submit, *args, **kwargs)\n return partial(futured.__new__, cls, cls.Executor(**kwargs).submit)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.func.__self__.__exit__(*args)\n\n\nclass threaded(executed):\n \"\"\"A partial function executed in its own thread pool.\"\"\"\n\n Executor = futures.ThreadPoolExecutor\n\n\nclass processed(executed):\n \"\"\"A partial function executed in its own process pool.\"\"\"\n\n Executor = futures.ProcessPoolExecutor\n\n\nwith contextlib.suppress(ImportError):\n\n class distributed(executed):\n \"\"\"A partial function executed by a dask distributed client.\"\"\"\n\n from distributed import as_completed, Client as Executor # type: ignore\n\n\nclass asynced(futured):\n \"\"\"A partial coroutine.\n\n Anywhere futures are expected, coroutines are also supported.\n \"\"\"\n\n @classmethod\n def results(cls, fs: Iterable, *, as_completed=False, **kwargs) -> Iterator:\n if as_completed or kwargs:\n return map(operator.methodcaller('result'), cls.tasks(fs, **kwargs))\n loop = asyncio.new_event_loop()\n tasks = list(map(loop.create_task, fs))\n return map(loop.run_until_complete, tasks)\n\n @staticmethod\n async def pair(key, future):\n return key, await future\n\n @classmethod\n def items(cls, pairs: Iterable, **kwargs) -> Iterator:\n return cls.results(itertools.starmap(cls.pair, pairs), as_completed=True, **kwargs)\n\n def run(self: Callable, *args, **kwargs):\n \"\"\"Synchronously call and run coroutine or asynchronous iterator.\"\"\"\n coro = self(*args, **kwargs)\n return asynced.iter(coro) if isinstance(coro, AsyncIterable) else asyncio.run(coro)\n\n @staticmethod\n def iter(aiterable: AsyncIterable, loop=None):\n \"\"\"Wrap an asynchronous iterable into an iterator.\n\n Analogous to `asyncio.run` for coroutines.\n \"\"\"\n loop = loop or asyncio.new_event_loop()\n anext = aiterable.__aiter__().__anext__\n task = loop.create_task(anext())\n while True:\n try:\n result = loop.run_until_complete(task)\n except StopAsyncIteration:\n return\n task = loop.create_task(anext())\n yield result\n\n class tasks(futured.tasks):\n __doc__ = futured.tasks.__doc__\n TimeoutError = asyncio.TimeoutError # type: ignore\n\n def __init__(self, coros: Iterable, **kwargs):\n self.loop = asyncio.new_event_loop()\n super().__init__(map(self.loop.create_task, coros), **kwargs)\n\n def add(self, coro):\n super().add(self.loop.create_task(coro))\n\n def wait(self, *args, **kwargs):\n return self.loop.run_until_complete(asyncio.wait(*args, **kwargs))\n\n\nclass command(subprocess.Popen):\n \"\"\"Asynchronous subprocess with a future compatible interface.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n\n def check(self, args, stdout, stderr):\n if self.returncode:\n raise subprocess.CalledProcessError(self.returncode, args, stdout, stderr)\n return stdout\n\n @classmethod\n async def coroutine(cls, *args, shell=False, **kwargs):\n \"\"\"Create a subprocess coroutine, suitable for timeouts.\"\"\"\n create = asyncio.create_subprocess_shell if shell else asyncio.create_subprocess_exec\n self = await create(*args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n return cls.check(self, args, *(await self.communicate()))\n\n def result(self, **kwargs) -> AnyStr:\n \"\"\"Return stdout or raise stderr.\"\"\"\n return self.check(self.args, *self.communicate(**kwargs))\n\n def pipe(self, *args, **kwargs) -> 'command':\n \"\"\"Pipe stdout to the next command's stdin.\"\"\"\n return type(self)(*args, stdin=self.stdout, **kwargs)\n\n def __or__(self, other: Iterable) -> 'command':\n \"\"\"Alias of [pipe][futured.command.pipe].\"\"\"\n return self.pipe(*other)\n\n def __iter__(self):\n \"\"\"Return output lines.\"\"\"\n return iter(self.result().splitlines())\n\n\ndef forked(values: Iterable, max_workers: Optional[int] = None) -> Iterator:\n \"\"\"Generate each value in its own child process and wait in the parent.\"\"\"\n max_workers = max_workers or os.cpu_count() or 1 # same default as ProcessPoolExecutor\n workers: dict = {}\n\n def wait():\n pid, status = os.wait()\n if pid in workers:\n value = workers.pop(pid)\n if status:\n raise OSError(status, value)\n\n for value in values:\n while len(workers) >= max_workers:\n wait()\n pid = os.fork()\n if pid:\n workers[pid] = value\n else: # pragma: no cover\n yield value\n os._exit(0)\n while workers:\n wait()\n\n\ndef decorated(base: type, **decorators: Callable) -> type:\n \"\"\"Return subclass with decorated methods.\"\"\"\n namespace = {name: decorators[name](getattr(base, name)) for name in decorators}\n return type(base.__name__, (base,), namespace)\n","repo_name":"coady/futured","sub_path":"futured/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"1434723347","text":"#!/usr/bin/env python3\nfrom flask import Flask, abort, render_template, redirect\n\nfrom data_churner import *\nfrom config import SITE_TITLE, EVENT_TITLE, GSHEET_KEY\n\n\n_flags = {\n 'gc': '/static/Jersey_yellow.png',\n 'kom': '/static/Jersey_polkadot.png',\n 'qom': '/static/Jersey_polkadot.png',\n 'sprint': '/static/Jersey_green.png',\n}\n\n\napp = Flask(__name__)\n\ngdoc_link = 'https://docs.google.com/spreadsheets/d/{}/'.format(GSHEET_KEY)\n\n\ndef get_raw_results():\n return parse_data()\n\n\ndef get_results(nmax=5):\n data = get_raw_results()\n results = compute_all_ride_results(*data, nmax)\n overall = compute_overall_totals(results, nmax)\n\n return data, results, overall\n\n\n@app.route('/')\ndef index():\n data, results, overall = get_results(5)\n return render_template(\n 'index.html',\n overall=overall,\n stages=results,\n flags=_flags,\n site_title=SITE_TITLE,\n event_title=EVENT_TITLE,\n gdoc_link=gdoc_link,\n )\n\n\n@app.route('/stage/')\ndef stage(stage_id):\n data, results, overall = get_results(3)\n for stage in results:\n if stage[0].id == stage_id:\n return render_template(\n 'stage.html',\n stage=stage[0],\n intermediate=stage[1],\n totals=stage[2],\n flags=_flags,\n site_title=SITE_TITLE,\n event_title=EVENT_TITLE,\n gdoc_link=gdoc_link,\n )\n abort(404)\n\n\n@app.route('/reload')\ndef reload():\n delete_cached_data()\n parse_data()\n return redirect('/', code=302)\n\n\n@app.errorhandler(404)\n@app.errorhandler(500)\ndef error(err):\n return 'not a page :('\n","repo_name":"a-johnston/cleats-racing","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4694134546","text":"# Matrix is a 2 dimensional array or list\nA = [\n [1, 2, 3],\n [4, 5, 6]\n]\n\n# printing a specific element of a matrix\nprint(A[0][1])\n\n# setting value of a matrix element\nA[0] [1] = 20\n\n# printing all the rows of a matrix\nfor row in A:\n print(row)\n\n# printing each item of a matrix separately\nfor row in A:\n for col in row:\n print(col)","repo_name":"anisul-Islam/python-tutorials-code","sub_path":"Program27.py","file_name":"Program27.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"22000669123","text":"name = 'Sadık Turan'\n\nfor letter in name:\n if letter == 'a':\n break # break : kelimede a harfini görürse durdurur\n elif letter == 'ı':\n continue # continue : kelimede ı harfini atlar devam eder (ı harfinin olduğu döngü\n # turunu iptal eder kaldığı yerden devam eder) \n print(letter)\n\n#------------------------------------------\n# peki while döngüsünde kullanırsak continue 'i :\n\nx = 0\nwhile x < 5:\n x += 1 \n if x == 2:\n break # while döngüsünden çıkmamızı sağlar . 2 gelince \n print(x)\n\n\nx = 0\nwhile x < 5:\n x += 1 # x += 1 'i döngünün basında koyduk çünkü contuine komutundan aşağısı devam etmez x 2 de takılı kalır\n if x == 2:\n continue # o anki döngüyü iptal eder başa döner . altındaki kısım devam etmeyeceği için\n print(x) # x 'i bir artırma kısmını yukarıya koyduk\n\n\n#------------------------------------------\n# TEST\n\n\n# 1- 100 e kadar tek sayıların toplamı\n\nx = 1\nresult = 0\n\nwhile x <= 100:\n x += 1\n if x % 2 == 0:\n continue\n result += x\n \n\nprint(f'toplam : {result} ')\n\n","repo_name":"enish0/BTK-Akademi-Python-notes","sub_path":"6 ] Python da Döngüler/5) break ve contune.py","file_name":"5) break ve contune.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"33820096090","text":"# Practice project - Sandwich Maker\n\nimport pyinputplus as pyin\n\nsandwichIngredients = []\nsandwichPrice = 0\n\n# Using inputMenu() for a bread type: wheat, white, or sourdough\nbreadPrices = {\"Wheat\":40, \"White\":35, \"Sourdough\":50}\n\nbreadType = pyin.inputMenu(['Wheat', 'White', 'Sourdough'], numbered=True)\nsandwichIngredients.append(breadType)\nsandwichPrice += breadPrices[breadType]\nprint(\"Selected Option: \" + str(breadType) + \"\\n\")\n\n# Using inputMenu() for a protein type: chicken, turkey, ham, or tofu\nproteinPrice = {\"Chicken\": 90, \"Turkey\":110, \"Ham\":90, \"Tofu\":120}\n\nproteinType = pyin.inputMenu(['Chicken', 'Turkey', 'Ham', 'Tofu'], numbered=True)\nsandwichIngredients.append(proteinType)\nsandwichPrice += proteinPrice[proteinType]\nprint(\"Selected Option: \" + str(proteinType) + \"\\n\")\n\n# Using inputYesNo() to ask if they want cheese.\n# If so, using inputMenu() to ask for a cheese type: cheddar, Swiss, or mozzarella.\ncheeseYN = pyin.inputYesNo(\"Would you like cheese with that? (Y/N)\")\ncheesePrice = {\"Cheddar\":10, \"Swiss\":20, \"Mozzarella\":14}\n\nif cheeseYN == \"yes\":\n\tcheeseType = pyin.inputMenu([\"Cheddar\", 'Swiss', 'Mozzarella'], numbered=True)\n\tsandwichIngredients.append(cheeseType)\n\tsandwichPrice += cheesePrice[cheeseType]\n\tprint(\"Selected Option: \" + str(cheeseType) + \"\\n\")\n\n# Using inputYesNo() to ask if they want mayo, mustard, lettuce, or tomato.\nmayoYN = pyin.inputYesNo(\"Add mayo?\")\n\nif mayoYN == \"yes\":\n\tsandwichIngredients.append(\"Mayo\")\n\tsandwichPrice += 15\n\nmustardYN = pyin.inputYesNo(\"Add mustard?\")\n\nif mustardYN == \"yes\":\n\tsandwichIngredients.append(\"Mustard\")\n\tsandwichPrice += 15\n\nlettuceYN = pyin.inputYesNo(\"Add lettuce?\")\n\nif lettuceYN == \"yes\":\n\tsandwichIngredients.append(\"lettuce\")\n\tsandwichPrice += 10\n\ntomatoYN = pyin.inputYesNo(\"Add tomato?\")\n\nif tomatoYN == \"yes\":\n\tsandwichIngredients.append(\"tomato\")\n\tsandwichPrice += 10\n\n# Using inputInt() to ask how many sandwiches they want. Make sure this number is 1 or more.\nnumOfSandwiches = pyin.inputInt(\"How many sandwiches do you want? \", min=1)\n\n# Come up with prices for each of these options, and have your program display a total cost after the user enters their selection.\nprint(\"\\nSandwich: \")\nprint(sandwichIngredients)\nprint(\"Price per sandwich: %s\" % sandwichPrice)\nprint(\"Total Price: %s\" % (sandwichPrice*numOfSandwiches))\n\n\n","repo_name":"Ttibsi/AutomateTheBoringStuff","sub_path":"PracticeProjects/ch8-SandwichMaker.py","file_name":"ch8-SandwichMaker.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18424709612","text":"# %cd /Users/kappamaki/Documents/workspace/advent_of_code2021/day17\ndef read_input(fname):\n line = open(fname).readline().strip().split(\"target area: \")[1]\n x, y = line.split(\", \")\n x = list(map(int, x.split(\"x=\")[1].split(\"..\")))\n y = list(map(int, y.split(\"y=\")[1].split(\"..\")))\n return x, y\n\n\nxlim, ylim = read_input(\"input\")\n\n###############################################################################\nfrom math import copysign\nfrom math import inf\n\n\ndef step(pos, vel):\n pos += vel\n velimag = vel.imag - 1\n if abs(vel.real) > 0:\n velreal = copysign(1, vel.real) * (abs(vel.real) - 1)\n else:\n velreal = vel.real\n return pos, velreal + 1j * velimag\n\n\ndef min_xvel(dist):\n xvel = 1\n while True:\n if xvel * (xvel + 1) / 2 > dist:\n return xvel\n else:\n xvel += 1\n\n\ndef step_until_out(pos, vel, xlim, ylim, verbose=False):\n max_height = pos.imag\n reached_target = (xlim[0] <= pos.real <= xlim[1]) & (ylim[0] <= pos.imag <= ylim[1])\n while pos.imag > ylim[0]:\n pos, vel = step(pos, vel)\n max_height = max(max_height, pos.imag)\n if (xlim[0] <= pos.real <= xlim[1]) & (ylim[0] <= pos.imag <= ylim[1]):\n reached_target = True\n if verbose:\n print(f\"just stepped to pos: {pos}, vel: {vel}, reached: {reached_target}\")\n return max_height, reached_target\n\n\nmaxi = - inf\ncount = 0\nfor xvel in range(min_xvel(xlim[0]), xlim[1] + 1):\n for yvel in range(-1000, 1000):\n pos = 0 + 0j\n vel = xvel + 1j * yvel\n max_height, reached_target = step_until_out(pos, vel, xlim, ylim)\n if reached_target:\n maxi = max(maxi, max_height)\n count += 1\n\nprint(f\"part 1: {maxi}\")\nprint(f\"part 2: {count}\")\n# 710 is too low\n###############################################################################\n","repo_name":"flothesof/advent_of_code2021","sub_path":"day17/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"17994608283","text":"import logging\nimport math\nimport os\nimport sys\nimport time\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\nfrom typing import Callable, Optional\n\nimport datasets\nimport numpy as np\nfrom datasets import Dataset, load_dataset\nfrom tqdm import tqdm\n\nimport jax\nimport jax.numpy as jnp\nimport optax\nimport transformers\nfrom flax import jax_utils, traverse_util\nfrom flax.jax_utils import unreplicate\nfrom flax.training import train_state\nfrom flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key\nfrom huggingface_hub import Repository, get_full_repo_name\nfrom transformers import (\n CONFIG_MAPPING,\n FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,\n AutoConfig,\n AutoTokenizer,\n FlaxAutoModelForCausalLM,\n HfArgumentParser,\n TrainingArguments,\n is_tensorboard_available,\n set_seed,\n GPT2Config,\n)\nfrom transformers.file_utils import get_full_repo_name\nfrom transformers.testing_utils import CaptureLogger\nfrom tokenizers import ByteLevelBPETokenizer\n\n\n\nimport os\nimport pickle\n\nSEED=42\n\nnum_train_epochs = 20\nper_device_train_batch_size = 64\nper_device_eval_batch_size = 64\n\nwarmup_steps = 1000\nlearning_rate = 5e-3\n\nblock_size =512\n\nlogging_steps = 500\nsave_steps = 2500\neval_steps=2500\ncommit_step = 1000\n\nmodel_name = \"gpt2_no\"\noutput_dir = \"gpt2_no\"\n\ndef data_loader(rng, dataset, batch_size, shuffle=False):\n steps_per_epoch = len(dataset) // batch_size\n\n if shuffle:\n batch_idx = np.random.permutation(len(dataset))\n else:\n batch_idx = np.arange(len(dataset))\n\n batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.\n batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))\n\n for idx in batch_idx:\n batch = dataset[idx]\n batch = {k: np.array(v) for k, v in batch.items()}\n\n yield batch\n\ndef create_learning_rate_fn(\n train_ds_size, train_batch_size, num_train_epochs, num_warmup_steps, learning_rate):\n\n steps_per_epoch = train_ds_size // train_batch_size\n num_train_steps = steps_per_epoch * num_train_epochs\n warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)\n decay_fn = optax.linear_schedule(\n init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps\n )\n schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])\n return schedule_fn\n\nclass TrainState(train_state.TrainState):\n dropout_rng: jnp.ndarray\n\n def replicate(self):\n return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))\n\ndef write_train_metric(summary_writer, train_metrics, train_time, step):\n summary_writer.scalar(\"train_time\", train_time, step)\n\n train_metrics = get_metrics(train_metrics)\n for key, vals in train_metrics.items():\n tag = f\"train_{key}\"\n for i, val in enumerate(vals):\n summary_writer.scalar(tag, val, step - len(vals) + i + 1)\n\ndef write_eval_metric(summary_writer, eval_metrics, step):\n for metric_name, value in eval_metrics.items():\n summary_writer.scalar(f\"eval_{metric_name}\", value, step)\n\n\n\ndef main():\n\n\n logging.basicConfig(filename=\"app.log\", level =logging.INFO)\n logger = logging.getLogger(__name__)\n\n jax_devices = jax.device_count()\n\n\n print(jax.devices())\n\n print(\"-----setting up huggingface repo------\")\n\n repo_name = get_full_repo_name(model_name)\n\n repo = Repository(output_dir, clone_from=repo_name)\n\n\n print(\"-------- Loading Dataset --------\")\n\n dataset = load_dataset(\"oscar\", \"unshuffled_deduplicated_no\")\n\n dataset[\"train\"] = load_dataset(\"oscar\", \"unshuffled_deduplicated_no\", split=f\"train[:90%]\")\n dataset[\"validation\"] = load_dataset(\"oscar\", \"unshuffled_deduplicated_no\", split=f\"train[90%:]\")\n\n column_names = dataset[\"train\"].column_names\n text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n\n print(\"-----Creating config----\")\n\n if not os.path.exists(\"{output_dir}/config.json\"):\n config = GPT2Config.from_pretrained(\"gpt2\", resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, vocab_size=50257)\n config.save_pretrained(output_dir)\n else:\n print(\"---Loading pretrained config\")\n config = AutoConfig.from_pretrained(output_dir)\n\n\n\n\n print(\"-------- Creating tokenizer --------\")\n\n if not os.path.exists(\"{output_dir}/tokenizer.json\"):\n\n tokenizer = ByteLevelBPETokenizer()\n\n def batch_iterator(batch_size=1000):\n for i in range(0, len(dataset), batch_size):\n yield dataset[\"train\"][i: i + batch_size][\"text\"]\n\n # Customized training\n tokenizer.train_from_iterator(batch_iterator(), vocab_size=50257, min_frequency=2, special_tokens=[\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ])\n\n # Save files to disk\n tokenizer.save(f\"./{output_dir}/tokenizer.json\")\n\n print(\"--Using cached tokenizer--\")\n tokenizer = AutoTokenizer.from_pretrained(f\"./{output_dir}\")\n\n print(\"-------- Tokenizing dataset --------\")\n\n tok_logger = transformers.utils.logging.get_logger(\"transformers.tokenization_utils_base\")\n\n \n\n if not os.path.exists(\"cached_datasets/tokenized_dataset.pkl\"):\n\n def tokenize_function(examples):\n with CaptureLogger(tok_logger) as cl:\n output = tokenizer(examples[text_column_name])\n # clm input could be much much longer than block_size\n if \"Token indices sequence length is longer than the\" in cl.out:\n tok_logger.warning(\n \"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model.\"\n )\n return output\n\n lm_datasets = dataset.map(\n tokenize_function,\n batched=True,\n remove_columns=column_names,\n load_from_cache_file=True,\n )\n\n with open(\"cached_datasets/tokenized_dataset.pkl\", \"wb\") as f:\n pickle.dump(lm_datasets, f)\n else:\n print(\"tokenized dataset on path, loading tokenized dataset\")\n\n with open(\"cached_datasets/tokenized_dataset.pkl\", \"rb\") as f:\n lm_datasets = pickle.load(f)\n\n\n print(f\"-------- grouping dataset with block size {block_size}--------\")\n\n if not os.path.exists(\"cached_datasets/grouped_dataset.pkl\"):\n\n\n\n def group_texts(examples):\n # Concatenate all texts.\n concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\n total_length = len(concatenated_examples[list(examples.keys())[0]])\n # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n # customize this part to your needs.\n if total_length >= block_size:\n total_length = (total_length // block_size) * block_size\n # Split by chunks of max_len.\n result = {\n k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n for k, t in concatenated_examples.items()\n }\n result[\"labels\"] = result[\"input_ids\"].copy()\n return result\n\n\n lm_datasets = lm_datasets.map(\n group_texts,\n batched=True,\n num_proc=8,\n )\n\n with open(\"grouped_dataset.pkl\", \"wb\") as f:\n pickle.dump(lm_datasets, f)\n\n else:\n print(\"grouped dataset on path, loading grouped dataset\")\n\n with open(\"cached_datasets/grouped_dataset.pkl\", \"rb\") as f:\n lm_datasets = pickle.load(f)\n\n train_dataset = lm_datasets[\"train\"]\n eval_dataset = lm_datasets[\"validation\"]\n\n has_tensorboard = is_tensorboard_available()\n if has_tensorboard and jax.process_index() == 0:\n try:\n from flax.metrics.tensorboard import SummaryWriter\n print(\"using SummaryWriter for logging\")\n summary_writer = SummaryWriter(log_dir=Path(\"summary/\"))\n except ImportError as ie:\n has_tensorboard = False\n logger.warning(\n f\"Unable to display metrics through TensorBoard because some package are not installed: {ie}\"\n )\n else:\n logger.warning(\n \"Unable to display metrics through TensorBoard because the package is not installed: \"\n \"Please run pip install tensorboard to enable.\"\n )\n\n print(\"--------setting up learning procedure--------\")\n\n\n rng = jax.random.PRNGKey(SEED)\n rng, dropout_rng = jax.random.split(rng)\n\n num_epochs = int(num_train_epochs)\n train_batch_size = int(per_device_train_batch_size) * jax_devices\n eval_batch_size = int(per_device_eval_batch_size) * jax_devices\n steps_per_epoch = len(train_dataset) // train_batch_size\n total_train_steps = steps_per_epoch * num_epochs\n\n print(\"-----setting up learning rate scheduler-----\")\n\n linear_decay_lr_schedule_fn = create_learning_rate_fn(\n len(train_dataset),\n train_batch_size,\n num_train_epochs,\n warmup_steps,\n learning_rate,\n )\n\n def decay_mask_fn(params):\n flat_params = traverse_util.flatten_dict(params)\n flat_mask = {\n path: (path[-1] != \"bias\" and path[-2:] not in [(\"ln_1\", \"scale\"), (\"ln_2\", \"scale\"), (\"ln_f\", \"scale\")])\n for path in flat_params\n }\n return traverse_util.unflatten_dict(flat_mask)\n\n print(\"-----setting up optimizer-----\")\n\n optimizer = optax.adamw(\n learning_rate=linear_decay_lr_schedule_fn,\n b1=0.9,\n b2=0.98,\n eps= 1e-08,\n weight_decay=0.01,\n mask=decay_mask_fn,\n )\n\n\n print(\"---- Loading model-----\")\n\n model = FlaxAutoModelForCausalLM.from_config(config, seed=SEED, dtype=getattr(jnp, \"float32\"))\n\n print(\"-----creating train state-----\")\n\n state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer, dropout_rng=dropout_rng)\n\n def loss_fn(logits, labels):\n shift_logits = logits[..., :-1, :]\n shift_labels = labels[..., 1:]\n loss = optax.softmax_cross_entropy(shift_logits, onehot(shift_labels, shift_logits.shape[-1]))\n return loss.mean()\n\n def train_step(state, batch):\n dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)\n\n def compute_loss(params):\n labels = batch.pop(\"labels\")\n logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]\n loss = loss_fn(logits, labels)\n return loss\n\n grad_fn = jax.value_and_grad(compute_loss)\n loss, grad = grad_fn(state.params)\n grad = jax.lax.pmean(grad, \"batch\")\n\n new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)\n\n metrics = {\"loss\": loss, \"learning_rate\": linear_decay_lr_schedule_fn(state.step)}\n metrics = jax.lax.pmean(metrics, axis_name=\"batch\")\n\n return new_state, metrics\n\n def eval_step(params, batch):\n labels = batch.pop(\"labels\")\n logits = model(**batch, params=params, train=False)[0]\n loss = loss_fn(logits, labels)\n\n metrics = {\"loss\": loss}\n metrics = jax.lax.pmean(metrics, axis_name=\"batch\")\n return metrics\n \n p_train_step = jax.pmap(train_step, \"batch\", donate_argnums=(0,))\n p_eval_step = jax.pmap(eval_step, \"batch\")\n\n state = state.replicate()\n\n\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {num_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel & distributed) = {train_batch_size}\")\n logger.info(f\" Total optimization steps = {total_train_steps}\")\n\n\n train_time = 0\n train_metrics = []\n\n epochs = tqdm(range(num_epochs), desc=\"Epoch ...\", position=0)\n\n for epoch in epochs:\n\n train_start = time.time() # Time of start of training\n\n rng, input_rng = jax.random.split(rng)\n\n train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True)\n\n steps_per_epoch = len(train_dataset) // train_batch_size\n\n for step in tqdm(range(steps_per_epoch), desc=\"Training...\", position=1, leave=False):\n batch = next(train_loader)\n batch = shard(batch) # Creates on-accelerator prefetch buffer (not neccesarry on TPUs)\n\n state, train_metric = p_train_step(state, batch)\n logging.info(f\"Epoch {epoch}, Train step {step}\")\n logging.info(train_metric)\n\n train_metrics.append(train_metric)\n\n cur_step = epoch * (len(train_dataset) // train_batch_size) + step\n\n if cur_step % logging_steps == 0 and cur_step > 0:\n train_metric = unreplicate(train_metric)\n train_time + time.time() - train_start\n\n if has_tensorboard and jax.process_index() == 0:\n write_train_metric(summary_writer, train_metrics, train_time, cur_step)\n\n epochs.write( f\"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})\" )\n\n train_metrics = []\n\n if cur_step % commit_step == 0 and cur_step > 0:\n # save checkpoint after each epoch and push checkpoint to the hub\n if jax.process_index() == 0:\n params = jax.device_get(unreplicate(state.params))\n model.save_pretrained(output_dir, params=params)\n tokenizer.save_pretrained(output_dir)\n\n commit_message = f\"Commit after epoch {epoch}, step {cur_step}\"\n\n repo.push_to_hub(commit_message=commit_message, blocking=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"navjordj/gpt2_no","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10855122181","text":"from collections import defaultdict as dd\n\nfrom utils import stream_lines\n\ndef is_lowpoint(lines, pt):\n len1, len2 = len(lines), len(lines[0])\n x,y = pt\n non_diag_diffs = [(0,1), (0,-1), (-1,0), (1,0)]\n non_diag_pts = [(x+a[0],y+a[1]) for a in non_diag_diffs]\n #diffs = [(x+a,y+b) for a in (-1,0,1) for b in (-1,0,1) if not (a == 0 and b==0)]\n for point in non_diag_pts:\n if point[0] < 0 or point[0] >= len1:\n continue\n if point[1] < 0 or point[1] >= len2:\n continue\n \n if lines[point[0]][point[1]] <= lines[x][y]:\n return False\n \n return True\n\ndef floodfill(lines, low_pt):\n len1, len2 = len(lines), len(lines[0])\n \n pts = [low_pt]\n\n non_diag_diffs = [(0,1), (0,-1), (-1,0), (1,0)]\n\n seen = set()\n num_pts = 0\n while pts:\n cur_pt = pts.pop()\n for pt in [(cur_pt[0]+a[0], cur_pt[1]+a[1]) for a in non_diag_diffs]:\n if pt[0] < 0 or pt[0] >= len1:\n continue\n if pt[1] < 0 or pt[1] >= len2:\n continue\n if pt not in seen:\n seen.add(pt)\n if lines[pt[0]][pt[1]] != 9:\n pts.append(pt)\n num_pts += 1\n \n return num_pts\n\n\nfile = 'prob09.in'\nlines = [[int(x) for x in line] for line in stream_lines(file)]\nlen1, len2 = len(lines), len(lines[0])\nprint(len1,len2)\nm = [[False for y in range(len2)] for x in range(len1)]\nlow_pts = []\nfor x in range(len1):\n for y in range(len2):\n if is_lowpoint(lines, (x,y)):\n low_pts.append((x,y))\n\nd: dict[tuple[int,int], int] = dict()\nfor pt in low_pts:\n d[pt] = floodfill(lines, pt)\n\n\nbigs = list(sorted([item for item in d.values()]))\nlast_three = bigs[-3:]\nval = 1\nfor item in last_three:\n print(item)\n val *= item\nprint(val)","repo_name":"Amfales/adventofcode2021","sub_path":"prob09/prob09b.py","file_name":"prob09b.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32098079142","text":"store=object()\ndejavu_store=store.dejavu_store=object()\nrenpy=object()\nrenpy.substitute=lambda text: text\nNARRATOR_NAME=\"SYSTEM\"\nONGOING_OUTCOME_NAME=\"ONGOING\"\nPLAYER_QUIT_OUTCOME_NAME=\"PLAYER_QUIT\"\nclass NoRollback:\n pass\ndef Character(name,*args,**kwargs):\n # color coding \n def say(what,*args,**kwargs):\n if name==NARRATOR_NAME:\n print(\"\\033[90m\"+what+\"\\033[0m\")\n else:\n print(\"\\033[92m\"+name+\"\\033[0m\"+\": \"+\"\\033[94m\"+what+\"\\033[0m\")\n return say\nnarrator=Character(NARRATOR_NAME)\ndef log_text(text):\n print(\"\\033[90m\"+text+\"\\033[0m\")\ndef log_object(obj):\n import json\n print(\"\\033[90m\"+json.dumps(obj,indent=4)+\"\\033[0m\")\n\n\"\"\"renpy\ninit offset=-100\ninit python hide:\n\"\"\"\n\nimport requests\nimport json\nimport urllib3\nimport time\nfrom typing import Literal\n\ndef on_new_scenario():\n assert dejavu_store.state=='disabled', \"You cannot start a new scenario inside the ai dialogue loop\"\n dejavu_store.current={}\n dejavu_store.state=\"disabled\"\n dejavu_store.character_objects={}\n dejavu_store.diary_references={}\ndejavu_store.on_new_scenario=on_new_scenario\n\ndef get_object(path):\n p=dejavu_store.scenario_data\n for key in path:\n p=p[key]\n return p\ndejavu_store.get_object=get_object\n\ndef set_state(state:'Literal[\"disabled\", \"opening_dialogue\",\"example_dialogue\",\"playing\"]'):\n assert state in [\"disabled\", \"opening_dialogue\",\"example_dialogue\",\"playing\"]\n dejavu_store.state=state\ndejavu_store.set_state=set_state\n\n\ndef write_dialogue(character_name,content,destination=None):\n destination=destination or dejavu_store.get_object(dejavu_store.current['dialogue'])['content']\n if character_name==NARRATOR_NAME:\n destination.append({\n 'type':'narrate',\n 'content':content,\n })\n else:\n destination.append({\n 'type':'dialogue',\n 'character':character_name,\n 'content':content,\n })\ndejavu_store.write_dialogue=write_dialogue\n\ndef get_outcome_label(outcome_name):\n return dejavu_store.scenario_data['outcomes'][outcome_name]['label']\ndejavu_store.get_outcome_label=get_outcome_label\n\ndef substitute(text):\n return renpy.substitute(text)\n\nclass DejavuCharacter:\n def __init__(self,name,is_player=False,*args,**kwargs):\n self.name=name\n self.is_player=is_player\n if self.name == NARRATOR_NAME:\n self.renpy_character=None\n else:\n self.renpy_character=Character(name,*args,**kwargs)\n def __call__(self,what,slience=False,no_substitution=False,*args,**kwargs):\n if not no_substitution: \n # only substitute for example dialogue, and injected narrates\n # do not substitute for player input and ai generated dialogue\n what=substitute(what)\n if dejavu_store.state==\"opening_dialogue\":\n dejavu_store.write_dialogue(self.name,what)\n if not slience:(self.renpy_character or narrator)(what,*args,**kwargs)\n elif dejavu_store.state==\"example_dialogue\":\n dejavu_store.write_dialogue(self.name,what)\n elif dejavu_store.state==\"playing\":\n dejavu_store.write_dialogue(self.name,what,destination=dejavu_store.history)\n if not slience:(self.renpy_character or narrator)(what,*args,**kwargs)\ndejavu_store.DejavuCharacter=DejavuCharacter\n\nclass RollBackHistory(NoRollback):\n def __init__(self):\n self.history={}\n def get(self,key,default=None):\n if key in self.history:\n return self.history[key]\n else:\n return default\n def set(self,key,value):\n self.history[key]=value\nclass RollBack:\n def __init__(self):\n self.history=RollBackHistory()\n self.counter=-1\n def get(self,default=None):\n self.counter+=1\n return self.history.get(self.counter,default=default)\n def set(self,value):\n self.history.set(self.counter,value)\ndejavu_store.RollBack=RollBack\n\n\n# ChatGPT API\n\n\ndejavu_store._api_key,dejavu_store._url=None,None\ndejavu_store._debug_print_request=False\ndejavu_store._debug_print_response=False\ndejavu_store._max_retry=5\ndejavu_store._retry_delay=10\n\ndef init_chatgpt_api(api_key,proxy=\"https://api.openai.com/v1/chat/completions\",debug_print_request=False,debug_print_response=False):\n dejavu_store._api_key,dejavu_store._url=api_key,proxy\n dejavu_store._debug_print_request,dejavu_store._debug_print_response=debug_print_request,debug_print_response\n\n\ndef completion(messages,temperature=1):\n if dejavu_store._url is None: raise Exception(\"You must call init_chatgpt_api(api_key,url) before using the completion function.\")\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {dejavu_store._api_key}\"\n }\n data = {\n \"model\": \"gpt-3.5-turbo-0613\",\n # \"model\": \"gpt-4-0613\",\n \"temperature\": temperature,\n \"messages\": messages\n }\n if dejavu_store._debug_print_request: print(\"Request:\",messages)\n completion=None\n i_retry=0\n while completion is None:\n response=None\n while response is None:\n try:\n response = requests.post(dejavu_store._url, headers=headers, data=json.dumps(data))\n except urllib3.exceptions.MaxRetryError:\n print(\"MaxRetryError, retrying in 5 seconds...\")\n time.sleep(5)\n if response is None:\n print(\"No response, retrying in 5 seconds...\")\n time.sleep(dejavu_store._retry_delay)\n if response.status_code == 200:\n completion = response.json()[\"choices\"][0][\"message\"]\n messages.append(completion)\n if dejavu_store._debug_print_response: print(\"Response:\",completion)\n return messages \n else:\n if dejavu_store._debug_print_response: print(f\"Error: {response.status_code}, {response.text}\")\n # raise Exception(f\"Error: {response.status_code}, {response.text}\")\n print(f\"Error: {response.status_code}, {response.text}\")\n i_retry+=1\n if i_retry>=dejavu_store._max_retry:\n raise Exception(f\"Error: {response.status_code}, {response.text}\")\n time.sleep(dejavu_store._retry_delay)\n \ndef purify_label(prediction:str,labels:\"list[str]\",default:str=\"None\",search_from:Literal[\"first\",\"last\"]=\"last\")->str:\n if default is None: raise Exception(\"You must specify a default value.\")\n # find the label which appears first/last in the prediction string\n best_label=default\n best_index=-1\n for label in labels:\n # find the index of the label in the prediction string\n index=prediction.rfind(label)\n if index!=-1:\n if best_index==-1 or (\n search_from==\"last\" and index>best_index\n ) or (\n search_from==\"first\" and index0:\n log_text(\"performing {name} query...\".format(name=name))\n if dejavu_store.log_level>1:\n log_text(\"query:\")\n log_object(query)\n response=completion(query,*args,**kwargs)\n if dejavu_store.log_level>0:\n log_text(\"{name} query responsed\".format(name=name))\n if dejavu_store.log_level>1:\n log_text(\"response:\")\n log_object(response[-1])\n return response\n\ndef perform_roleplay_query(character_name,scenario,history):\n request=compose_roleplay_request(character_name,scenario,history)\n response=completion_with_log(\"perform_roleplay_query\",request,temperature=0.5)\n return response[-1][\"content\"]\n\ndef perform_check_outcome_query(scenario,history,removed_incidents=[]):\n if len(scenario[\"outcomes\"])-len(removed_incidents)<=0:\n return ONGOING_OUTCOME_NAME, \"ongoing\", \"No outcome defined.\"\n request=compose_check_outcome_request(scenario,history,remove_incidents=removed_incidents)\n response=completion_with_log(\"check_outcome\",request,temperature=0)\n response_text=response[-1][\"content\"]\n if dejavu_store.log_level>=1:\n log_text(response_text)\n target_labels=list(scenario['outcomes'].keys())+[ONGOING_OUTCOME_NAME]\n outcome_name=purify_label(response_text,target_labels,default=ONGOING_OUTCOME_NAME)\n if outcome_name==ONGOING_OUTCOME_NAME:\n outcome_type=\"ongoing\"\n else:\n outcome_type=scenario[\"outcomes\"][outcome_name][\"type\"]\n return outcome_name, outcome_type, response_text\n\ndef perform_summary_query(character_name,scenario,history):\n request=compose_summary_request(character_name,scenario,history)\n response=completion_with_log(\"summary\",request,temperature=0)\n summary=response[-1][\"content\"]\n summary=summary.replace(\"\\n\",\" \")\n return summary\n\ndejavu_store.init_chatgpt_api=init_chatgpt_api\ndejavu_store.perform_roleplay_query=perform_roleplay_query\ndejavu_store.perform_check_outcome_query=perform_check_outcome_query\ndejavu_store.perform_summary_query=perform_summary_query\n\ndejavu_store.init_chatgpt_api(api_key=open(\"C:\\\\openai.txt\").read(), proxy=\"https://api.openai.com/v1/chat/completions\")\n\n","repo_name":"fangzhangmnm/Dejavu-Adventurers","sub_path":".old/dejavu_python_ren.py","file_name":"dejavu_python_ren.py","file_ext":"py","file_size_in_byte":14815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"10583262856","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport pickle\nfrom datetime import datetime\n\n\ndef plot_sim(status, basename=None, save=True, title=None, ylimit=None):\n\n # plt.figure()\n infl_array = np.asarray(status.h_infl)\n seg_array = np.asarray(status.h_seg)\n x_array = np.arange(infl_array.size)\n plt.plot(x_array, infl_array, label=\"Infected\", marker='o')\n plt.plot(x_array, seg_array, label=\"Removed\", marker='v')\n plt.legend()\n\n if title is not None:\n plt.title(title)\n\n if ylimit is not None:\n plt.ylim(ylimit)\n\n plt.xlabel('Day')\n plt.ylabel('Population')\n\n if basename is None:\n basename = datetime.now().strftime('%Y%m%d%H%M')\n\n filename = '{}.png'.format(basename)\n\n if save:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.0)\n else:\n plt.show()\n\n\ndef save_status(status, basename=None):\n\n if basename is None:\n basename = datetime.now().strftime('%Y%m%d%H%M')\n\n filename = '{}.pickle'.format(basename)\n\n with open(filename, 'wb') as f:\n pickle.dump(status, f)\n\n\ndef load_status(basename):\n\n filename = '{}.pickle'.format(basename)\n\n with open(filename, 'rb') as f:\n status = pickle.load(f)\n return status\n\n\ndef save_as_csv(status, basename):\n infl_array = np.asarray(status.h_infl)\n seg_array = np.asarray(status.h_seg)\n filename = '{}.csv'.format(basename)\n\n stacked = np.stack([infl_array, seg_array])\n np.savetxt(filename,\n stacked.T,\n delimiter=',',\n fmt='%d',\n header='infl,segregated')\n\n\ndef plot_sims(status_list,\n infected=True,\n segregated=True,\n filename=None,\n save=True,\n title=None,\n ylimit=None,\n xlimit=None):\n\n # plt.figure()\n\n for status, labels, marks in status_list:\n infl_array = np.asarray(status.h_infl)\n seg_array = np.asarray(status.h_seg)\n x_array = np.arange(infl_array.size)\n if infected:\n plt.plot(x_array, infl_array, label=labels[0], marker=marks[0])\n if segregated:\n plt.plot(x_array, seg_array, label=labels[1], marker=marks[1])\n\n plt.legend()\n\n if title is not None:\n plt.title(title)\n\n if ylimit is not None:\n plt.ylim(ylimit)\n\n if xlimit is not None:\n plt.xlim(xlimit)\n\n plt.xlabel('Day')\n plt.ylabel('Population')\n if infected and not segregated:\n plt.ylabel('Infected Population')\n if not infected and segregated:\n plt.ylabel('Removed Population')\n\n if filename is None:\n filename = datetime.now().strftime('%Y%m%d%H%M')\n\n filename = '{}.png'.format(filename)\n\n if save:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.0)\n else:\n plt.show()\n","repo_name":"RoloAfrole/sim-covid19","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5032759897","text":"\"\"\"\nFeature analysis.\n\n@author: Soufiane Mourragui\n\nThis modules contains all the codes used in the Taylor expansion for the Gaussian/Matern\nkernel.\n\"\"\"\n\nimport gc\nimport logging\nfrom functools import reduce\nfrom itertools import combinations_with_replacement\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom joblib import Parallel, delayed\n\n\ndef higher_order_contribution(\n d: int,\n data: np.array,\n sample_offset: np.array,\n gene_names: list,\n gamma: float,\n n_jobs: int = 1,\n return_matrix: bool = False,\n):\n r\"\"\"Compute the features corresponding to the Taylor expansion of the kernel.\n\n Compute the features corresponding to the Taylor expansion of the kernel, i.e. $x_j exp^{-\\gamma xx^T}$ for\n linear features. Returns a sparse pandas DataFrame containing all the features (columns) by samples (rows).\n We here critically rely on the sparsity of the data-matrix to speed up computations. The current implementation\n is relevant in two cases:\n -When dimensionality is small\n -When data is sparse.\n\n High-dimensional and dense data matrices would lead to a significant over-head without computational gains,\n and could benefit from another implementation strategy.\n\n Parameters\n ----------\n d: int\n Order of the features to compute, e.g. 1 for linear, 2 for interaction terms.\n\n data: np.array\n Data to compute features on, samples in the rows and genes (features) in the columns.\n\n sample_offset: np.array\n Offset of each sample from data.\n\n gene_names: list\n Names of each columns in data ; corresponds to features naming.\n\n gamma: float\n Value of the gamma parameter for Matérn kernel.\n\n n_jobs: int, default to 1\n Number of concurrent threads to use. -1 will use all CPU cores possible.\n WARNING: for d >= 2 and a large number of genes, the routine can be memory-intensive and a high n_jobs\n could lead to crash.\n\n return_matrix: bool, default to False\n If True, then returns simply the feature-matrix without feature-naming. In cases when feature names\n are not relevant (e.g. computing the proportion of non-linearities), return_matrix=True can help\n speed-up the process.\n\n Returns\n -------\n pd.DataFrame\n Sparse dataframe with samples in the rows and named features in the columns.\n For instance, when d=1, returns each column of data scaled by RKHS normalisation factor and multiplied\n by offset value.\n \"\"\"\n # Exploits sparsity of scRNA-seq data (even more handy when d >= 2)\n # Note to future user: this can be an issue if data is not sparse\n sparse_data = scipy.sparse.csc_matrix(data)\n\n # Compute features by iterating over possible combinations\n logging.info(\"\\t START FEATURES\")\n combinations_features = Parallel(n_jobs=n_jobs, verbose=1, max_nbytes=1e6, pre_dispatch=int(1.5 * n_jobs))(\n delayed(combinatorial_product)(sparse_data, x, gamma)\n for x in combinations_with_replacement(np.arange(sparse_data.shape[1]), r=d)\n )\n gc.collect()\n\n # Combine features and multiply columns by offset.\n logging.info(\"\\t START CONCATENATION\")\n logging.info(\"\\t\\t START STACKING\")\n combinations_features = scipy.sparse.hstack(combinations_features, format=\"csc\")\n logging.info(\"\\t\\t START PRODUCT\")\n combinations_features = scipy.sparse.diags(sample_offset).dot(combinations_features)\n gc.collect()\n if return_matrix:\n return combinations_features\n\n # Return names of each features.\n logging.info(\"\\t\\t FIND NAMES\")\n combinations_names = Parallel(\n n_jobs=min(5, n_jobs), verbose=1, max_nbytes=1e4, pre_dispatch=int(1.5 * min(5, n_jobs))\n )(delayed(_interaction_name)(x) for x in combinations_with_replacement(gene_names, r=d))\n\n return pd.DataFrame.sparse.from_spmatrix(data=combinations_features, columns=combinations_names)\n\n\ndef _combination_to_idx(idx, p):\n r\"\"\"Transform a combination (tuple of feature idx) into an indicative function.\n\n Parameters\n ----------\n idx: tuple\n Combination of features in the form of a tuple.
\n E.g. for 6 genes, (5,1) corresponds to the product of 1 and 5 and returns\n (0,1,0,0,0,1), while (1,2,3,2) will yield (0,1,2,1,0,0).
\n WARNING: start at 0.\n\n p: int\n Number of genes (features) in the dataset.\n\n Returns\n -------\n np.array\n Indicative function of the combination\n \"\"\"\n return np.array([np.sum(np.array(idx) == i) for i in range(p)])\n\n\ndef basis(x, k, gamma):\n r\"\"\"Compute the basis function for a single gene, except offset term.\n\n Parameters\n ----------\n x: np.array\n Column vector (each row corresponds to a sample).\n\n k: int\n Order to compute.\n\n gamma: float\n Parameter of Matérn kernel.\n\n Returns\n -------\n np.array\n Value of the higher order feature.\n \"\"\"\n if k == 0:\n return np.ones(x.shape[0])\n\n product = x\n for _ in range(1, k):\n product = x.multiply(product)\n coef = np.power(2 * gamma, k / 2) / np.sqrt(scipy.special.factorial(k))\n\n return coef * product\n\n\ndef combinatorial_product(x, idx, gamma):\n \"\"\"\n Compute the basis function for a single gene, except offset term.\n\n Parameters\n ----------\n x: np.array\n Data matrix with samples in the rows and genes in the columns\n\n idx: tuple\n Combinations, i.e. tuple of features to take into account.\n\n gamma: float\n Parameter of Matérn kernel.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n Values of the higher order feature.\n \"\"\"\n # Iterate over all genes and compute the feature weight by multiplication\n prod = [basis(x[:, i], k, gamma) for i, k in enumerate(_combination_to_idx(idx, x.shape[1])) if k > 0]\n if len(prod) == 0:\n return 1\n\n return reduce(scipy.sparse.csc_matrix.multiply, prod)\n\n\ndef _interaction_name(gene_combi):\n combin_name = [f\"{g}^{r}\" for g, r in zip(*np.unique(gene_combi, return_counts=True))]\n return \"*\".join(combin_name) if len(combin_name) > 0 else \"1\"\n\n\ndef _higher_order_interaction_wrapper(data, x, gamma, gene_names):\n return [combinatorial_product(data, x, gamma), _interaction_name(gene_names, _combination_to_idx(x, data.shape[1]))]\n\n\ndef _compute_offset(data, gamma):\n r\"\"\"Compute the sample-level offset values, i.e. $\\exp -\\gamma xx^T$.\n\n Parameters\n ----------\n data: np.array\n Data to compute features on, samples in the rows and genes (features) in the columns.\n\n gamma: float\n Value of the gamma parameter for Matérn kernel.\n\n Returns\n -------\n np.array\n One-dimensional vector with offset values of all samples.\n \"\"\"\n sample_offset = np.linalg.norm(data, axis=1)\n return np.exp(-gamma * np.power(sample_offset, 2))\n","repo_name":"NKI-CCB/sobolev_alignment","sub_path":"sobolev_alignment/feature_analysis.py","file_name":"feature_analysis.py","file_ext":"py","file_size_in_byte":6876,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"78207456","text":"import sys\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QApplication, QMainWindow, QWidget, QLabel, QLineEdit, QPushButton, QStackedWidget, QGridLayout, QMenuBar\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont, QPainter, QColor, QBrush, QPen\n\nclass CircleWidget(QWidget):\n def __init__(self):\n super().__init__()\n self.stage = 1\n self.total_stages = 4\n self.stages_arr = [\"start\", \"Parachute release\", \"Cansat deploy\", \"tether released\"]\n self.altitude = 300 # Set the initial altitude value\n self.setStyleSheet(\"\"\"\n background-color: white;\n \"\"\")\n\n def paintEvent(self, event):\n painter = QPainter(self)\n radius = 40 # Circle radius\n spacing = 110 # Spacing between circles\n x_offset = 50 # Horizontal offset\n\n for stage in range(self.total_stages):\n x = x_offset + (stage * (2 * radius + spacing))\n y = 50\n\n # Determine the circle color based on altitude\n color = self.getStageColor(stage)\n\n painter.setPen(Qt.black)\n painter.setBrush(QBrush(color))\n painter.drawEllipse(x, y, radius * 2, radius * 2)\n\n # Draw connecting lines\n \n x_prev = x_offset + ((stage - 1) * (2 * radius + spacing))\n y_prev = y + radius\n painter.setPen(QPen(Qt.black, 3))\n painter.drawLine(x_prev + 2 * radius, y_prev, x, y + radius)\n\n # Draw stage numbers and labels\n painter.setFont(QFont(\"Arial\", 12))\n painter.setPen(QColor(0, 0, 0))\n painter.drawText(x + radius - 10, y + radius + 5, str(stage + 1))\n label = f\"{self.stages_arr[stage]}\"\n label_width = painter.fontMetrics().width(label)\n painter.drawText(int(x + (radius - label_width) / 2), y - 20, label)\n\n def getStageColor(self, stage):\n # Determine circle color based on altitude\n if self.altitude == 0 and stage == 0:\n return Qt.yellow\n elif self.altitude >= 300 and stage < 2:\n return Qt.green\n elif self.altitude >= 450 and stage < 3:\n return Qt.green\n elif self.altitude >= 750:\n return Qt.green\n else:\n return Qt.yellow","repo_name":"Bhoomika156/Vishwa_GUI","sub_path":"statusBar.py","file_name":"statusBar.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13609967320","text":"#coding:utf-8\nimport sys\nimport sunau\nimport numpy as np\nimport scipy.fftpack\nimport matplotlib\nmatplotlib.use('WXAgg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n#ファイル\nwf = sunau.open(\"blues.00000.au\" , \"r\" )\nfs = wf.getframerate() # サンプリング周波数\nx = wf.readframes(wf.getnframes())\nx = np.frombuffer(x, dtype= \"int16\") / 32768.0 # -1 - +1に正規化\nwf.close()\n\nfig = plt.figure()\nax = Axes3D(fig)\n\n#start = 0 # サンプリングする開始位置\nN = 512 # FFTのサンプル数\nSHIFT = 128 # 窓関数をずらすサンプル数\nstep = 20 #サンプル総数\n\nhammingWindow = np.hamming(N)\nfreqList = np.fft.fftfreq(N, d=1.0/fs) # 周波数軸の値を計算\n\n#グラフ\n#plx = np.arange(0, freqList, 1)\nply = np.arange(0, step, 1)\nplX, plY = np.meshgrid(freqList, ply)\nplZ = np.empty((0,freqList.size),float)\n\ni = 0\nwhile i < step :\n start = i * SHIFT\n windowedData = hammingWindow * x[start:start+N] # 切り出した波形データ(窓関数あり)\n X = np.fft.fft(windowedData) # FFT\n amplitudeSpectrum = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in X] # 振幅スペクトル\n #配列の追加\n plZ_tmp = np.vstack((plZ, amplitudeSpectrum))\n plZ = plZ_tmp\n i += 1\n\nax.plot3D(np.ravel(plX),np.ravel(plY),np.ravel(plZ))\nplt.show()\n","repo_name":"ChiSenSan/workspace","sub_path":"Python/sfft_plot3d.py","file_name":"sfft_plot3d.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"86557382963","text":"from pathlib import Path\nimport re\n\np = Path('/Users/rbryan/PycharmProjects/ATBSChapter9/regexFiles/')\ntextFiles = list(p.glob('*.txt'))\nprint(textFiles)\nuserRegex = input('Enter your desired RegEx:\\n') # Ask user for regex to search for\ncompiledRegex = re.compile(userRegex) # Compile user's regex\nmatchedRegex = {}\n\nfor currentFile in range(len(textFiles)):\n openedFile = open(textFiles[currentFile])\n fileContent = openedFile.read()\n matchedRegex[textFiles[currentFile]] = (compiledRegex.findall(fileContent))\n openedFile.close()\nfor i in matchedRegex.keys():\n print(matchedRegex[i])\n","repo_name":"pirbpi/ATBS","sub_path":"regexSearch.py","file_name":"regexSearch.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72131396624","text":"import pytest\nfrom swap_meet.vendor import Vendor\nfrom swap_meet.item import Item\n\n# @pytest.mark.skip\ndef test_items_have_blank_default_category():\n item = Item()\n assert item.category == \"\"\n\n# @pytest.mark.skip\ndef test_get_items_by_category():\n item_a = Item(category=\"clothing\")\n item_b = Item(category=\"electronics\")\n item_c = Item(category=\"clothing\")\n vendor = Vendor(\n inventory=[item_a, item_b, item_c]\n )\n\n items = vendor.get_by_category(\"clothing\")\n\n assert len(items) == 2\n assert item_a in items\n assert item_c in items\n assert item_b not in items\n\n# @pytest.mark.skip\ndef test_get_no_matching_items_by_category():\n item_a = Item(category=\"clothing\")\n item_b = Item(category=\"clothing\")\n item_c = Item(category=\"clothing\")\n vendor = Vendor(\n inventory=[item_a, item_b, item_c]\n )\n\n items = vendor.get_by_category(\"electronics\")\n\n # raise Exception(\"Complete this test according to comments below.\")\n # *********************************************************************\n # ****** Complete Assert Portion of this test **********\n # *********************************************************************\n \n # more pythonic than assert len(items) == 0\n assert not items\n","repo_name":"vuongthu/swap-meet","sub_path":"tests/unit_tests/test_wave_02.py","file_name":"test_wave_02.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25637952621","text":"\n\nimport json\nfrom datetime import datetime\nfrom decimal import Decimal\n\nimport ddt\nimport mock\nimport pytz\nimport responses\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.test import RequestFactory, override_settings\nfrom django.urls import reverse\nfrom opaque_keys.edx.keys import CourseKey\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.test import factories\nfrom rest_framework import status\nfrom waffle.testutils import override_flag\n\nfrom ecommerce.coupons.tests.mixins import DiscoveryMockMixin\nfrom ecommerce.courses.tests.factories import CourseFactory\nfrom ecommerce.entitlements.utils import create_or_update_course_entitlement\nfrom ecommerce.extensions.api.serializers import OrderSerializer\nfrom ecommerce.extensions.api.tests.test_authentication import AccessTokenMixin\nfrom ecommerce.extensions.api.v2.constants import ENABLE_HOIST_ORDER_HISTORY\nfrom ecommerce.extensions.api.v2.tests.views import OrderDetailViewTestMixin\nfrom ecommerce.extensions.checkout.exceptions import BasketNotFreeError\nfrom ecommerce.extensions.checkout.views import ReceiptResponseView\nfrom ecommerce.extensions.fulfillment.signals import SHIPPING_EVENT_NAME\nfrom ecommerce.extensions.fulfillment.status import LINE, ORDER\nfrom ecommerce.extensions.test.factories import create_order, prepare_voucher\nfrom ecommerce.tests.factories import SiteConfigurationFactory\nfrom ecommerce.tests.mixins import Applicator, ThrottlingMixin\nfrom ecommerce.tests.testcases import TestCase\n\nBasket = get_model('basket', 'Basket')\nBenefit = get_model('offer', 'Benefit')\nOrder = get_model('order', 'Order')\nProduct = get_model('catalogue', 'Product')\nShippingEventType = get_model('order', 'ShippingEventType')\npost_checkout = get_class('checkout.signals', 'post_checkout')\nUser = get_user_model()\n\n\n@ddt.ddt\nclass OrderListViewTests(AccessTokenMixin, ThrottlingMixin, TestCase):\n def setUp(self):\n super(OrderListViewTests, self).setUp()\n self.path = reverse('api:v2:order-list')\n self.user = self.create_user()\n self.token = self.generate_jwt_token_header(self.user)\n\n def test_not_authenticated(self):\n \"\"\" If the user is not authenticated, the view should return HTTP status 401. \"\"\"\n response = self.client.get(self.path)\n self.assertEqual(response.status_code, 401)\n\n def assert_empty_result_response(self, response):\n \"\"\" Verifies that the view responded successfully with an empty result list. \"\"\"\n self.assertEqual(response.status_code, 200)\n\n content = response.json()\n self.assertEqual(content['count'], 0)\n self.assertEqual(content['results'], [])\n\n @responses.activate\n def test_oauth2_authentication(self):\n \"\"\"Verify clients can authenticate with OAuth 2.0.\"\"\"\n auth_header = 'Bearer {}'.format(self.DEFAULT_TOKEN)\n\n self.mock_user_info_response(username=self.user.username)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=auth_header)\n self.assert_empty_result_response(response)\n\n def test_no_orders(self):\n \"\"\" If the user has no orders, the view should return an empty list. \"\"\"\n self.assertFalse(self.user.orders.exists())\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assert_empty_result_response(response)\n\n def test_with_orders(self):\n \"\"\"\n The view should return a list of the user's orders, sorted reverse chronologically,\n filtered by current site's partner.\n \"\"\"\n order = create_order(site=self.site, user=self.user)\n site = SiteConfigurationFactory().site\n create_order(site=site, user=self.user)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(Order.objects.count(), 2)\n self.assertEqual(content['count'], 1)\n self.assertEqual(content['results'][0]['number'], str(order.number))\n\n # Test ordering\n order_2 = create_order(site=self.site, user=self.user)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(content['count'], 2)\n self.assertEqual(content['results'][0]['number'], str(order_2.number))\n self.assertEqual(content['results'][1]['number'], str(order.number))\n\n @ddt.data(True, False)\n def test_enable_hoist_order_history(self, enable_hoist_order_history_flag):\n \"\"\" Verify that orders contain the Order History flag value \"\"\"\n with override_flag(ENABLE_HOIST_ORDER_HISTORY, active=enable_hoist_order_history_flag):\n create_order(site=self.site, user=self.user)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(content['results'][0]['enable_hoist_order_history'], enable_hoist_order_history_flag)\n\n @ddt.data(\n # certificate_type, has_discount, percent_benefit, credit_provider, credit_hours, create_enrollment_code, sku\n ('credit', False, 0, 'Harvard', 1, False, '123'),\n ('credit', True, 15, 'Harvard', 1, False, '456'),\n ('verified', True, 15, None, 0, False, '789'),\n ('audit', False, 0, None, 0, False, '124'),\n )\n @ddt.unpack\n @mock.patch('ecommerce.extensions.checkout.views.ReceiptResponseView.get_enterprise_learner_portal_url')\n @mock.patch('ecommerce.extensions.checkout.views.ReceiptResponseView.get_metadata_for_enterprise_user')\n def test_orders_api_attributes_for_receipt_mfe(\n self, certificate_type, has_discount, percent_benefit,\n credit_provider, credit_hours, create_enrollment_code, sku,\n mock_get_metadata_for_enterprise_user, mock_get_enterprise_learner_portal_url,\n ):\n \"\"\"\n Verify that orders have the values added in the Orders API serializer\n to be utilized in the receipt page in ecommerce MFE.\n \"\"\"\n test_learner_portal_url = 'http://fake-learner-portal-url.org'\n mock_get_metadata_for_enterprise_user.return_value = {\n 'id': 1,\n 'active': True,\n 'enterprise_customer': {'slug': 'fake-enterprise'},\n }\n mock_get_enterprise_learner_portal_url.return_value = test_learner_portal_url\n price = 100.00\n currency = 'USD'\n course_id = 'a/b/c'\n course = CourseFactory(id=course_id, name='Test Course', partner=self.partner)\n product = factories.ProductFactory(\n categories=[],\n stockrecords__price_excl_tax=price,\n stockrecords__price_currency=currency\n )\n basket = factories.BasketFactory(owner=self.user, site=self.site)\n product = course.create_or_update_seat(\n certificate_type,\n True,\n price,\n credit_provider=credit_provider,\n credit_hours=credit_hours,\n create_enrollment_code=create_enrollment_code,\n sku=sku,\n )\n\n if has_discount:\n voucher, product = prepare_voucher(\n _range=factories.RangeFactory(products=[product]),\n benefit_value=percent_benefit,\n benefit_type=Benefit.PERCENTAGE\n )\n basket.vouchers.add(voucher)\n\n basket.add_product(product)\n Applicator().apply(basket, user=basket.owner, request=self.request)\n order = factories.create_order(basket=basket, user=self.user)\n\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assertEqual(response.status_code, 200)\n\n content = json.loads(response.content.decode('utf-8'))\n payment_method = ReceiptResponseView().get_payment_method(order)\n\n for line in order.lines.all():\n # Test for: is_enrollment_code_product\n self.assertEqual(create_enrollment_code, line.product.is_enrollment_code_product)\n\n # Test for: credit_provider in attr\n self.assertEqual(getattr(line.product.attr, 'credit_provider', None), credit_provider)\n\n # Test for: contains_credit_seat\n self.assertIn('contains_credit_seat', content['results'][0])\n if credit_provider:\n self.assertEqual(content['results'][0]['contains_credit_seat'], True)\n\n # Test for: basket_discounts\n self.assertIn('basket_discounts', content['results'][0])\n if has_discount:\n self.assertEqual(\n float(percent_benefit),\n content['results'][0]['basket_discounts'][0]['benefit_value']\n )\n self.assertEqual(\n currency,\n content['results'][0]['basket_discounts'][0]['currency']\n )\n\n # Test for: payment_method\n self.assertEqual(payment_method, content['results'][0]['payment_method'])\n\n # Test for: discount\n if has_discount:\n self.assertEqual(float(percent_benefit), float(content['results'][0]['discount']))\n else:\n self.assertEqual('0', content['results'][0]['discount'])\n\n # Test for: total_before_discounts_incl_tax\n self.assertEqual(float(price), float(content['results'][0]['total_before_discounts_incl_tax']))\n\n # Test for: dashboard_url\n self.assertIn('dashboard_url', content['results'][0])\n\n # Test for: enterprise_customer\n self.assertIn('enterprise_learner_portal_url', content['results'][0])\n if has_discount:\n self.assertEqual(\n test_learner_portal_url,\n content['results'][0]['enterprise_learner_portal_url']\n )\n\n # Test for: order_product_ids\n self.assertIn('order_product_ids', content['results'][0])\n expected_order_product_ids = ','.join(map(str, order.lines.values_list('product_id', flat=True)))\n self.assertEqual(expected_order_product_ids, content['results'][0]['order_product_ids'])\n\n # Test for: product_tracking\n with self.settings(AWIN_ADVERTISER_ID=1234):\n self.assertTrue(content['results'][0]['product_tracking'])\n\n # Test for: course_organization\n self.assertIn('course_organization', content['results'][0]['lines'][0])\n self.assertEqual(CourseKey.from_string(course_id).org, content['results'][0]['lines'][0]['course_organization'])\n\n def test_with_other_users_orders(self):\n \"\"\" The view should only return orders for the authenticated users. \"\"\"\n other_user = self.create_user()\n create_order(site=self.site, user=other_user)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assert_empty_result_response(response)\n\n order = create_order(site=self.site, user=self.user)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content['count'], 1)\n self.assertEqual(content['results'][0]['number'], str(order.number))\n\n @ddt.unpack\n @ddt.data(\n (True, True),\n (True, False),\n )\n def test_staff_superuser(self, is_staff, is_superuser):\n \"\"\" The view should return all orders for when authenticating as a staff member or superuser. \"\"\"\n admin_user = self.create_user(is_staff=is_staff, is_superuser=is_superuser)\n order = create_order(site=self.site, user=self.user)\n\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.generate_jwt_token_header(admin_user))\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content['count'], 1)\n self.assertEqual(content['results'][0]['number'], str(order.number))\n\n def test_user_information(self):\n \"\"\" Make sure that the correct user information is returned. \"\"\"\n admin_user = self.create_user(is_staff=True, is_superuser=True)\n order = create_order(site=self.site, user=admin_user)\n\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.generate_jwt_token_header(admin_user))\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content['count'], 1)\n self.assertEqual(content['results'][0]['number'], str(order.number))\n self.assertEqual(content['results'][0]['user']['email'], admin_user.email)\n self.assertEqual(content['results'][0]['user']['username'], admin_user.username)\n\n def test_username_filter_with_staff(self):\n \"\"\" Verify the staff user can filter data by username.\"\"\"\n\n # create two orders for different users\n order = create_order(site=self.site, user=self.user)\n other_user = self.create_user()\n other_order = create_order(site=self.site, user=other_user)\n\n requester = self.create_user(is_staff=True)\n self.client.login(email=requester.email, password=self.password)\n\n self.assert_list_with_username_filter(self.user, order)\n self.assert_list_with_username_filter(other_user, other_order)\n\n def test_username_filter_with_non_staff(self):\n \"\"\"Non staff users are not allowed to filter on any other username.\"\"\"\n requester = self.create_user(is_staff=False)\n self.client.login(username=requester.username, password=self.password)\n\n response = self.client.get(self.path, {'username': self.user.username})\n self.assertEqual(response.status_code, 403)\n\n def assert_list_with_username_filter(self, user, order):\n \"\"\" Helper method for making assertions. \"\"\"\n\n response = self.client.get(self.path, {'username': user.username})\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(\n response.data['results'][0],\n OrderSerializer(order, context={'request': RequestFactory(SERVER_NAME=self.site.domain).get('/')}).data\n )\n\n def test_orders_with_multiple_sites(self):\n \"\"\"\n The view should return a list of the user's orders for multiple sites against same partner.\n \"\"\"\n order = create_order(site=self.site, user=self.user)\n second_order = create_order(site=self.site, user=self.user)\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(Order.objects.count(), 2)\n self.assertEqual(content['count'], 2)\n self.assertEqual(content['results'][0]['number'], str(second_order.number))\n self.assertEqual(content['results'][1]['number'], str(order.number))\n\n # Configure new site for same partner.\n domain = 'testserver.fake.internal'\n site_configuration = SiteConfigurationFactory(\n from_email='from@example.com',\n oauth_settings={\n 'SOCIAL_AUTH_EDX_OAUTH2_KEY': 'key',\n 'SOCIAL_AUTH_EDX_OAUTH2_SECRET': 'secret'\n },\n partner=self.partner,\n segment_key='fake_segment_key',\n site__domain=domain,\n base_cookie_domain=domain,\n )\n\n self.request.site = site_configuration.site\n self.client = self.client_class(SERVER_NAME=domain)\n\n response = self.client.get(self.path, HTTP_AUTHORIZATION=self.token)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(content['count'], 2)\n self.assertEqual(content['results'][0]['number'], str(second_order.number))\n self.assertEqual(content['results'][1]['number'], str(order.number))\n\n\n@ddt.ddt\n@override_settings(ECOMMERCE_SERVICE_WORKER_USERNAME='test-service-user')\nclass OrderFulfillViewTests(TestCase):\n def setUp(self):\n super(OrderFulfillViewTests, self).setUp()\n ShippingEventType.objects.get_or_create(name=SHIPPING_EVENT_NAME)\n\n # Use the ecommerce worker service user in order to cover\n # request throttling code in extensions/api/throttles.py\n self.user = self.create_user(is_staff=True, username='test-service-user')\n self.change_order_permission = Permission.objects.get(codename='change_order')\n self.user.user_permissions.add(self.change_order_permission)\n\n self.client.login(username=self.user.username, password=self.password)\n\n self.order = create_order(site=self.site, user=self.user)\n self.url = reverse('api:v2:order-fulfill', kwargs={'number': self.order.number})\n\n def _put_to_view(self):\n \"\"\"\n PUT to the view being tested.\n\n Returns:\n Response\n \"\"\"\n return self.client.put(self.url)\n\n def _assert_fulfillment_success(self):\n \"\"\"Verify that order fulfillment was successful. The view should return HTTP 200.\"\"\"\n with mock.patch('ecommerce.extensions.order.processing.EventHandler.handle_shipping_event') as mocked:\n def handle_shipping_event(order, _event_type, _lines, _line_quantities, **_kwargs):\n order.status = ORDER.COMPLETE\n order.save()\n return order\n\n mocked.side_effect = handle_shipping_event\n response = self._put_to_view()\n\n self.assertTrue(mocked.called)\n self.assertEqual(200, response.status_code)\n\n return response\n\n @ddt.data('delete', 'get', 'post')\n def test_delete_get_post_prohibited(self, method):\n \"\"\"Verify that the view does not allow DELETE, GET, or POST.\"\"\"\n response = getattr(self.client, method)(self.url)\n\n # TODO: Since the view is routed to PUT and PATCH, DELETE, GET, and\n # POST *should* all be met with 405. However, permissions checks appear\n # to occur first. As a result, when a user with change permissions\n # attempts a POST or DELETE, the response has status code 403, since\n # the user doesn't have permission to create or delete orders.\n self.assertIn(response.status_code, [405, 403])\n\n def test_login_required(self):\n \"\"\" The view should return HTTP 401 status if the user is not logged in. \"\"\"\n self.client.logout()\n self.assertEqual(401, self._put_to_view().status_code)\n\n def test_change_permissions_required(self):\n \"\"\"\n Verify that staff users with permission to change Order objects are\n able to modify orders on behalf of other users.\n \"\"\"\n customer = self.create_user(username='customer')\n customer_order = create_order(site=self.site, user=customer)\n self.url = reverse('api:v2:order-fulfill', kwargs={'number': customer_order.number})\n\n self._assert_fulfillment_success()\n\n # If the requesting user does not have the correct permissions, the view should\n # return HTTP 403 status.\n self.user.user_permissions.remove(self.change_order_permission)\n self.assertEqual(403, self._put_to_view().status_code)\n\n def test_order_complete_state_disallowed(self):\n \"\"\" If the order is Complete, the view must return an HTTP 406. \"\"\"\n self.order.status = ORDER.COMPLETE\n self.order.save()\n self.assertEqual(406, self._put_to_view().status_code)\n\n @ddt.data(ORDER.OPEN, ORDER.FULFILLMENT_ERROR)\n def test_ideal_conditions(self, order_status):\n \"\"\"\n If the user is authenticated/authorized, and the order is in the Open or Fulfillment Error\n states, the view should attempt to fulfill the order. The view should return HTTP 200.\n \"\"\"\n self.order.status = order_status\n self.order.save()\n\n response = self._assert_fulfillment_success()\n\n # Reload the order from the DB and check its status\n self.order = Order.objects.get(number=self.order.number)\n self.assertEqual(str(self.order.number), response.data['number'])\n self.assertEqual(self.order.status, response.data['status'])\n\n def test_fulfillment_failed(self):\n \"\"\" If fulfillment fails, the view should return HTTP 500. \"\"\"\n self.order.status = ORDER.FULFILLMENT_ERROR\n self.order.save()\n\n response = self._put_to_view()\n self.assertEqual(500, response.status_code)\n\n def test_email_opt_in_default(self):\n \"\"\"\n Verify that email_opt_in defaults to false if not given.\n \"\"\"\n with mock.patch.object(post_checkout, 'send', side_effect=post_checkout.send):\n self._assert_fulfillment_success()\n send_arguments = {\n 'sender': post_checkout,\n 'order': self.order,\n 'request': mock.ANY,\n 'email_opt_in': False,\n }\n post_checkout.send.assert_called_once_with(**send_arguments)\n\n @ddt.data(True, False)\n def test_email_opt_in(self, expected_opt_in):\n \"\"\"\n Verify that email_opt_in is set to the query param if given.\n \"\"\"\n # Add email_opt_in to url\n self.url += '?email_opt_in={expected_opt_in}'.format(expected_opt_in=expected_opt_in)\n with mock.patch.object(post_checkout, 'send', side_effect=post_checkout.send):\n self._assert_fulfillment_success()\n send_arguments = {\n 'sender': post_checkout,\n 'order': self.order,\n 'request': mock.ANY,\n 'email_opt_in': expected_opt_in,\n }\n post_checkout.send.assert_called_once_with(**send_arguments)\n\n\nclass OrderDetailViewTests(OrderDetailViewTestMixin, TestCase):\n @property\n def url(self):\n return reverse('api:v2:order-detail', kwargs={'number': self.order.number})\n\n\n@ddt.ddt\nclass ManualCourseEnrollmentOrderViewSetTests(TestCase, DiscoveryMockMixin):\n \"\"\"\n Test the `ManualCourseEnrollmentOrderViewSet` functionality.\n \"\"\"\n def setUp(self):\n super(ManualCourseEnrollmentOrderViewSetTests, self).setUp()\n self.url = reverse('api:v2:manual-course-enrollment-order-list')\n self.user = self.create_user(is_staff=True)\n self.client.login(username=self.user.username, password=self.password)\n self.course = CourseFactory(id='course-v1:MAX+CX+Course', partner=self.partner)\n self.course_uuid = '620a5ce5-6ff4-4b2b-bea1-a273c6920ae5'\n self.course_price = 50\n self.course.create_or_update_seat(\n certificate_type='verified',\n id_verification_required=True,\n price=self.course_price\n )\n self.course.create_or_update_seat(\n certificate_type='audit',\n id_verification_required=False,\n price=0\n )\n self.course_entitlement = create_or_update_course_entitlement(\n 'verified', 100, self.partner, self.course_uuid, 'Course Entitlement'\n )\n self.mock_access_token_response()\n self.mock_course_run_detail_endpoint(\n self.course,\n discovery_api_url=self.site.siteconfiguration.discovery_api_url,\n course_run_info={\n 'course_uuid': self.course_uuid\n }\n )\n responses.start()\n\n def tearDown(self):\n super().tearDown()\n responses.stop()\n responses.reset()\n\n def build_jwt_header(self, user):\n \"\"\"\n Return header for the JWT auth.\n \"\"\"\n return {'HTTP_AUTHORIZATION': self.generate_jwt_token_header(user)}\n\n def post_order(self, data, user):\n \"\"\"\n Make HTTP POST request and return the JSON response.\n \"\"\"\n data = json.dumps(data)\n headers = self.build_jwt_header(user)\n response = self.client.post(self.url, data, content_type='application/json', **headers)\n return response.status_code, response.json()\n\n def test_auth(self):\n \"\"\"\n Test that endpoint only works with the staff user\n \"\"\"\n post_data = self.generate_post_data(1)\n # Test unauthenticated access\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Test non-staff user\n non_staff_user = self.create_user(is_staff=False)\n status_code, __ = self.post_order(post_data, non_staff_user)\n self.assertEqual(status_code, status.HTTP_403_FORBIDDEN)\n\n # Test staff user\n status_code, __ = self.post_order(post_data, self.user)\n self.assertEqual(status_code, status.HTTP_200_OK)\n\n def test_bad_request(self):\n \"\"\"\n Test that HTTP 400 is return if `enrollments` key isn't in request\n \"\"\"\n response_status, response_data = self.post_order({}, self.user)\n\n self.assertEqual(response_status, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response_data, {\n \"status\": \"failure\",\n \"detail\": \"Invalid data. No `enrollments` field.\"\n })\n\n def test_missing_enrollment_data(self):\n \"\"\"\"\n Test that orders are marked as failures if expected data is not present in enrollment.\n \"\"\"\n\n # Single enrollment with no enrollment details\n post_data = {\"enrollments\": [{}]}\n _, response_data = self.post_order(post_data, self.user)\n\n error_detail = \"Missing required enrollment data: 'lms_user_id', 'username', 'email', 'course_run_key', 'mode'\"\n self.assertEqual(response_data, {\n \"orders\": [{\n \"status\": \"failure\",\n \"detail\": error_detail,\n \"new_order_created\": None\n }]\n })\n\n @ddt.unpack\n @ddt.data(\n (0.0, True),\n (50.0, True),\n (100.0, True),\n (-1.0, False),\n (100.001, False),\n (50, False),\n )\n def test_create_manual_order_with_discount_percentage(self, discount_percentage, is_valid):\n \"\"\"\"\n Test that orders with valid and invalid discount percentages.\n \"\"\"\n\n post_data = self.generate_post_data(1, discount_percentage=discount_percentage)\n _, response_data = self.post_order(post_data, self.user)\n if is_valid:\n self.assertEqual(len(response_data.get(\"orders\")), 1)\n self.assertEqual(response_data.get('orders')[0]['status'], \"success\")\n else:\n self.assertEqual(response_data.get('orders')[0]['status'], \"failure\")\n self.assertEqual(\n response_data.get('orders')[0]['detail'],\n \"Discount percentage should be a float from 0 to 100.\"\n )\n\n @ddt.unpack\n @ddt.data(\n (\"verified\", True),\n (\"honor\", False),\n (\"audit\", False),\n )\n def test_create_manual_order_with_mode(self, course_mode, is_paid):\n \"\"\"\"\n Test that orders with valid and invalid course modes.\n \"\"\"\n post_data = self.generate_post_data(1, mode=course_mode)\n _, response_data = self.post_order(post_data, self.user)\n if is_paid:\n self.assertEqual(len(response_data.get(\"orders\")), 1)\n self.assertEqual(response_data.get('orders')[0]['status'], \"success\")\n else:\n self.assertEqual(response_data.get('orders')[0]['status'], \"failure\")\n self.assertEqual(\n response_data.get('orders')[0]['detail'],\n \"Course mode should be paid\"\n )\n\n def test_create_manual_order(self):\n \"\"\"\"\n Test that manual enrollment order can be created with expected data.\n \"\"\"\n post_data = {\n \"enrollments\": [\n {\n \"lms_user_id\": 11,\n \"username\": \"ma\",\n \"email\": \"ma@example.com\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"discount_percentage\": 50.0,\n \"sales_force_id\": \"dummy-sales_force_id\",\n },\n {\n \"lms_user_id\": 12,\n \"username\": \"ma2\",\n \"email\": \"ma2@example.com\",\n \"discount_percentage\": 0.0,\n \"sales_force_id\": \"\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n {\n \"lms_user_id\": 13,\n \"username\": \"ma3\",\n \"email\": \"ma3@example.com\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"discount_percentage\": 100.0,\n \"sales_force_id\": None,\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n {\n \"lms_user_id\": 14,\n \"username\": \"ma4\",\n \"email\": \"ma4@example.com\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"discount_percentage\": 100.0,\n \"enterprise_customer_name\": \"another-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae2\",\n },\n # to test if enterprise_customer_name updated for existing condition\n {\n \"lms_user_id\": 15,\n \"username\": \"ma5\",\n \"email\": \"ma5@example.com\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"discount_percentage\": 100.0,\n \"enterprise_customer_name\": \"another-enterprise-customer_with_new_name\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae2\",\n },\n # If discount percentage is not set then effective_contract_discount_percentage should be NULL.\n {\n \"lms_user_id\": 16,\n \"username\": \"ma6\",\n \"email\": \"ma6@example.com\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"another-enterprise-customer_with_new_name\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae2\",\n }\n ]\n }\n\n response_status, response_data = self.post_order(post_data, self.user)\n\n expected_enrollments = post_data[\"enrollments\"]\n # updating customer name to latest one\n expected_enrollments[3]['enterprise_customer_name'] = \"another-enterprise-customer_with_new_name\"\n\n self.assertEqual(response_status, status.HTTP_200_OK)\n\n orders = response_data.get(\"orders\")\n self.assertEqual(len(orders), len(expected_enrollments))\n for response_order, expected_enrollment in zip(orders, expected_enrollments):\n user = User.objects.get(\n username=expected_enrollment['username'],\n email=expected_enrollment['email'],\n lms_user_id=expected_enrollment['lms_user_id']\n )\n\n # get created order\n order = Order.objects.get(number=response_order['detail'])\n\n # verify basket owner is correct\n basket = Basket.objects.get(id=order.basket_id)\n\n self.assertEqual(basket.owner, user)\n\n # verify order is created with expected data\n self.assertEqual(order.status, ORDER.COMPLETE)\n self.assertEqual(order.total_incl_tax, 0)\n self.assertEqual(order.lines.count(), 1)\n line = order.lines.first()\n\n # verify line has the correct 'effective_contract_discount_percentage' and\n # line_effective_contract_discounted_price values\n discount_percentage = expected_enrollment.get('discount_percentage')\n sales_force_id = expected_enrollment.get('sales_force_id')\n if discount_percentage is None:\n self.assertEqual(line.effective_contract_discount_percentage, None)\n self.assertEqual(line.effective_contract_discounted_price, None)\n else:\n line_effective_discount_percentage = Decimal('0.01') * Decimal(discount_percentage)\n line_effective_contract_discounted_price = line.unit_price_excl_tax \\\n * (Decimal('1.00000') - line_effective_discount_percentage).quantize(Decimal('.00001'))\n self.assertEqual(line.effective_contract_discount_percentage, line_effective_discount_percentage)\n self.assertEqual(line.effective_contract_discounted_price, line_effective_contract_discounted_price)\n\n self.assertEqual(line.status, LINE.COMPLETE)\n self.assertEqual(line.line_price_before_discounts_incl_tax, self.course_price)\n product = Product.objects.get(id=line.product.id)\n self.assertEqual(product.course_id, self.course.id)\n\n # verify condition\n offer = order.discounts.first().offer\n condition = offer.condition\n if sales_force_id:\n self.assertEqual(offer.sales_force_id, sales_force_id)\n self.assertEqual(condition.enterprise_customer_name, expected_enrollment.get('enterprise_customer_name'))\n self.assertEqual(\n str(condition.enterprise_customer_uuid),\n str(expected_enrollment.get('enterprise_customer_uuid'))\n )\n\n def test_create_manual_order_with_date_placed(self):\n \"\"\"\"\n Test that manual enrollment order for old enrollment can be created correctly.\n \"\"\"\n price_1 = 100\n price_2 = 200\n final_price = 300\n stock_record = self.course.seat_products.filter(\n attributes__name='certificate_type'\n ).exclude(\n attribute_values__value_text='audit'\n ).first().stockrecords.first()\n\n time_at_initial_price = datetime.now(pytz.utc).isoformat()\n\n stock_record.price_excl_tax = price_1\n stock_record.save()\n stock_record.price_excl_tax = price_2\n stock_record.save()\n\n time_at_price_2 = datetime.now(pytz.utc).isoformat()\n\n stock_record.price_excl_tax = final_price\n stock_record.save()\n\n time_at_final_price = datetime.now(pytz.utc).isoformat()\n\n self.assertEqual(stock_record.history.count(), 4)\n\n post_data = {\n \"enrollments\": [\n {\n \"lms_user_id\": 11,\n \"username\": \"ma1\",\n \"email\": \"ma`@example.com\",\n \"date_placed\": time_at_initial_price,\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n {\n \"lms_user_id\": 12,\n \"username\": \"ma2\",\n \"email\": \"ma2@example.com\",\n \"date_placed\": time_at_price_2,\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n {\n \"lms_user_id\": 13,\n \"username\": \"ma3\",\n \"email\": \"ma3@example.com\",\n \"date_placed\": time_at_final_price,\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n ]\n }\n\n response_status, response_data = self.post_order(post_data, self.user)\n expected_enrollments = post_data[\"enrollments\"]\n self.assertEqual(response_status, status.HTTP_200_OK)\n orders = response_data.get(\"orders\")\n self.assertEqual(len(orders), len(expected_enrollments))\n\n for response_order, expected_enrollment in zip(orders, expected_enrollments):\n # get created order\n order = Order.objects.get(number=response_order['detail'])\n expected_date_placed = expected_enrollment['date_placed']\n self.assertEqual(order.date_placed.isoformat(), expected_date_placed)\n self.assertEqual(order.lines.count(), 1)\n line = order.lines.first()\n\n if expected_date_placed == time_at_initial_price:\n expected_course_price = self.course_price\n elif expected_date_placed == time_at_price_2:\n expected_course_price = price_2\n elif expected_date_placed == time_at_final_price:\n expected_course_price = final_price\n else:\n expected_course_price = \"Invalid Price\"\n self.assertEqual(line.line_price_before_discounts_incl_tax, expected_course_price)\n self.assertEqual(line.line_price_before_discounts_excl_tax, expected_course_price)\n self.assertEqual(line.line_price_incl_tax, 0)\n self.assertEqual(line.line_price_excl_tax, 0)\n\n def test_create_manual_order_with_existing_entitlement(self):\n \"\"\"\"\n Test when user had already purchased the course entitlement.\n \"\"\"\n # purchasing self.course's course_entitlement for self.user\n basket = factories.BasketFactory(owner=self.user, site=self.site)\n basket.add_product(self.course_entitlement, 1)\n order = create_order(basket=basket, user=self.user)\n order.lines.update(status=LINE.COMPLETE)\n\n course_without_discovery_data = CourseFactory(id='course-v1:Demo+Demox+Course', partner=self.partner)\n\n pre_request_order_count = Order.objects.count()\n\n post_data = {\n \"enrollments\": [\n # test when user have existing course entitlement purchased.\n {\n \"lms_user_id\": self.user.lms_user_id,\n \"username\": self.user.username,\n \"email\": self.user.email,\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n # test when user have NOT purchased course entitlement.\n {\n \"lms_user_id\": 12,\n \"username\": \"ma2\",\n \"email\": \"ma2@example.com\",\n \"course_run_key\": self.course.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n },\n # test if there is not any record against a course in the discovery.\n {\n \"lms_user_id\": 13,\n \"username\": \"ma3\",\n \"email\": \"ma3@example.com\",\n \"course_run_key\": course_without_discovery_data.id,\n \"mode\": \"verified\",\n \"enterprise_customer_name\": \"an-enterprise-customer\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n }\n ]\n }\n\n response_status, response_data = self.post_order(post_data, self.user)\n expected_enrollments = post_data[\"enrollments\"]\n self.assertEqual(response_status, status.HTTP_200_OK)\n self.assertEqual(pre_request_order_count + 1, Order.objects.count())\n orders = response_data.get(\"orders\")\n self.assertEqual(len(orders), len(expected_enrollments))\n\n self.assertEqual(orders[0]['status'], 'success')\n self.assertEqual(orders[0]['lms_user_id'], self.user.lms_user_id)\n self.assertEqual(orders[0]['new_order_created'], False)\n\n self.assertEqual(orders[1]['status'], 'success')\n self.assertEqual(orders[1]['lms_user_id'], 12)\n self.assertEqual(orders[1]['new_order_created'], True)\n\n self.assertEqual(orders[2]['status'], 'failure')\n self.assertEqual(orders[2]['detail'], 'Failed to create free order')\n\n def test_create_manual_order_with_incorrect_course(self):\n \"\"\"\"\n Test that manual enrollment order endpoint returns expected error response if course is incorrect.\n \"\"\"\n post_data = self.generate_post_data(1)\n post_data[\"enrollments\"][0][\"course_run_key\"] = \"course-v1:MAX+ABC+Course\"\n\n _, response_data = self.post_order(post_data, self.user)\n self.assertEqual(response_data[\"orders\"][0][\"detail\"], \"Course not found\")\n\n def test_create_manual_order_idempotence(self):\n \"\"\"\"\n Test that manual enrollment order endpoint does not create multiple orders if called multiple\n times with same data.\n \"\"\"\n post_data = self.generate_post_data(1)\n response_status, response_data = self.post_order(post_data, self.user)\n self.assertEqual(response_status, status.HTTP_200_OK)\n existing_order_number = response_data[\"orders\"][0][\"detail\"]\n\n response_status, response_data = self.post_order(post_data, self.user)\n self.assertEqual(response_status, status.HTTP_200_OK)\n self.assertEqual(response_data[\"orders\"][0][\"detail\"], existing_order_number)\n\n def test_bulk_all_correct(self):\n \"\"\"\n Test that endpoint correctly handles correct bulk enrollments\n \"\"\"\n post_data = self.generate_post_data(3)\n response_status, response_data = self.post_order(post_data, self.user)\n self.assertEqual(response_status, status.HTTP_200_OK)\n for index, enrollment in enumerate(post_data[\"enrollments\"]):\n order_number = response_data[\"orders\"][index][\"detail\"]\n self.assertEqual(\n dict(enrollment, status=\"success\", detail=order_number, new_order_created=True),\n response_data[\"orders\"][index]\n )\n\n def test_bulk_all_failure(self):\n \"\"\"\n Test that endpoint correctly handles invalid bulk enrollments\n \"\"\"\n post_data = self.generate_post_data(3)\n # Replace course run key of all enrollments with invalid course\n post_data[\"enrollments\"] = [\n dict(enrollment, course_run_key=\"course-v1:MAX+ABC+Course\")\n for enrollment in post_data[\"enrollments\"]\n ]\n response_status, response_data = self.post_order(post_data, self.user)\n self.assertEqual(response_status, status.HTTP_200_OK)\n for index, enrollment in enumerate(post_data[\"enrollments\"]):\n self.assertEqual(\n dict(enrollment, status=\"failure\", detail=\"Course not found\", new_order_created=None),\n response_data[\"orders\"][index]\n )\n\n def test_bulk_mixed_success(self):\n \"\"\"\n Test that endpoint correctly handles a mix of correct and invalid bulk enrollments\n \"\"\"\n post_data = self.generate_post_data(3)\n # Replace course run key for first enrollment only\n post_data[\"enrollments\"][0][\"course_run_key\"] = \"course-v1:MAX+ABC+Course\"\n response_status, response_data = self.post_order(post_data, self.user)\n self.assertEqual(response_status, status.HTTP_200_OK)\n for index, enrollment in enumerate(post_data[\"enrollments\"]):\n if index == 0:\n # Order should fail because missing enrollment\n self.assertEqual(\n dict(enrollment, status=\"failure\", detail=\"Course not found\", new_order_created=None),\n response_data[\"orders\"][index]\n )\n else:\n # Order should succeed\n order_number = response_data[\"orders\"][index][\"detail\"]\n self.assertEqual(\n dict(enrollment, status=\"success\", detail=order_number, new_order_created=True),\n response_data[\"orders\"][index]\n )\n\n @mock.patch(\n 'ecommerce.extensions.api.v2.views.orders.EdxOrderPlacementMixin.place_free_order',\n new_callable=mock.PropertyMock,\n side_effect=BasketNotFreeError\n )\n def test_create_manual_order_exception(self, __):\n \"\"\"\"\n Test that manual enrollment order endpoint returns expected error if an error occurred in\n `place_free_order`.\n \"\"\"\n post_data = self.generate_post_data(1)\n _, response_data = self.post_order(post_data, self.user)\n order = response_data[\"orders\"][0]\n self.assertEqual(order[\"status\"], \"failure\")\n self.assertEqual(order[\"detail\"], \"Failed to create free order\")\n\n def generate_post_data(self, enrollment_count, discount_percentage=0.0, mode=\"verified\"):\n return {\n \"enrollments\": [\n {\n \"lms_user_id\": 10 + count,\n \"username\": \"ma{}\".format(count),\n \"email\": \"ma{}@example.com\".format(count),\n \"course_run_key\": self.course.id,\n \"mode\": mode,\n \"discount_percentage\": discount_percentage,\n \"enterprise_customer_name\": \"customer_name\",\n \"enterprise_customer_uuid\": \"394a5ce5-6ff4-4b2b-bea1-a273c6920ae1\",\n }\n for count in range(enrollment_count)\n ]\n }\n","repo_name":"openedx/ecommerce","sub_path":"ecommerce/extensions/api/v2/tests/views/test_orders.py","file_name":"test_orders.py","file_ext":"py","file_size_in_byte":46198,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"47"} +{"seq_id":"15616298443","text":"def multiplica(*args):\n total = 1\n for numero in args:\n total *= numero \n return total\n\nmultiplicacao = multiplica(10,2,3,4,5)\nprint(multiplicacao)\n\ndef par_impar(numero):\n multiplo_de_dois = numero % 2 == 0\n\n if multiplo_de_dois:\n return f'{numero} é par'\n return f'{numero} é impar'\n\noutro_par_impar = par_impar\ndois_e_par = outro_par_impar(2)\nprint(dois_e_par)\nprint(par_impar(3))\n\n","repo_name":"LucasVoyager/Python","sub_path":"Review intermediario/review4.py","file_name":"review4.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23182851484","text":"__author__ = \"CryDeTaan\"\n\nimport time\nimport logging\nfrom collections import Counter\nfrom re import search as re_search\n\nimport config\nfrom core.utils import coinmarketcap_api\nfrom core.utils import telegram_api\nfrom core.utils import prepare_string_response_for_coin\n\n\nlogger = logging.getLogger(__name__)\n\n# Some variables from and not from the config file.\ncoin_list = config.telegram_bot['coin_list']\n\nban_dict = {}\nrecent_sender_dict = {}\nlast_message_id = None\n\n\ndef listener():\n \"\"\"\n The listener listens to any messages to the bot, privately or in a group.\n These messages are then handled by the different handlers.\n Its acts as the gateway between the APIs and the utilities.\n :return:\n \"\"\"\n\n '''\n Need to keep track of the message id, which is used as an offset in the telegram API, so that the messages \n before this offset are basically marked as read and will no longer be available to the bot.\n We will update this ID during the while loop.\n '''\n global last_message_id\n\n '''\n Using the API get the unread messages, and if there are any new messages send them to the handler where most of the\n processing will happen. This function really just acts as a gateway.\n '''\n messages = telegram_api.get_messages(last_message_id)\n\n '''\n Only process the response if the correct data was received. if not log result attribute missing.\n '''\n logger.debug(f'Checking Telegram API response.')\n if 'result' in messages and len(messages['result']) > 0:\n last_message_id = get_last_message_id(messages) + 1\n handle_messages(messages)\n\n logger.debug(f'Message result attribute missing.')\n\n\ndef updater():\n \"\"\"\n Get the price of the set coins in the config file from coinmarketcap API and send telegram on an hourly basis.\n\n :return: None\n \"\"\"\n\n for coin in coin_list:\n coin = coinmarketcap_api.get_coin_data(coin)\n\n # Check if the coin variable contains any of the expected data.\n if 'name' in coin[0]:\n logger.debug(f'Setting up response for sending message to Telegram API.')\n coin_string = str(prepare_string_response_for_coin.Coin(coin))\n telegram_api.send_message(text=coin_string)\n continue\n\n\ndef get_last_message_id(messages):\n\n \"\"\"\n Get the last message ID that will be used as an offset for the telegram API, the offset is used to basically mark\n message as read, and all previous messages will forgotten.\n\n :param messages: a list of messages received by the bot.\n\n :return: An integer for the last 'update_id' parameter from the given messages\n \"\"\"\n\n messages_ids = []\n\n for message in messages[\"result\"]:\n messages_ids.append(int(message[\"update_id\"]))\n\n return max(messages_ids)\n\n\ndef update_ban_list(messages):\n \"\"\"\n\n Check if the sender has sent two commands in the last 5 seconds or multiple messages in API getUpdates\n call, if so ban for 5 min and notify the user they have been banned for 5 min.\n The function updates this dictionary outside of it's scope as we only care about the dictionary from another\n function. This might not be the best way of doing it, but ¯\\_(ツ)_/¯\n\n :param messages: messages['date'] and message['from']['first_name']\n\n :return: None\n \"\"\"\n\n # This dictionary will be used to send a message to all the users that has just been banned.\n banned_users = []\n\n '''\n use Counter from the collections module to count the occurrences of the messages sent by a user. Save this to a\n dictionary that will be iterated over to determine if any used has sent multiple messages in a single getUpdates\n API call to the telegram API. \n '''\n try:\n messages_count = dict(Counter([item['message']['from']['first_name'] for item in messages['result']]))\n except KeyError:\n logger.debug(f'Oh shit, lets hope this does not happen often. No banning can occur this time around.')\n return\n # TODO: There has to be a better way, this is a shitty hack.\n\n '''\n Calculate if sender should be banned. If the same sender count is more than on in one get_updates, we can \n already assume that more than 1 message has been sent in 5 seconds and therefore will be banned for 5 minutes.\n '''\n for from_name, message_count in messages_count.items():\n if message_count > 2:\n ban_dict[from_name] = time.time() + 300\n banned_users.append(from_name)\n logger.debug(f'{from_name} has been banned for 5 min')\n\n '''\n Calculate if sender should be banned. If the same sender sends more than two message is 5 seconds,\n they will be banned for 5 minutes.\n \n If the user is not banned, either by the previous step or this step, we still want to note the time so that we \n can calculate if we've seen a message from the same user in the last 5 seconds, \n which is what happens in the else section.\n '''\n sender_time = {message['message']['from']['first_name']: message['message']['date'] for message in\n messages['result']}\n for from_name in sender_time:\n if from_name in recent_sender_dict:\n if (recent_sender_dict[from_name] + 5) > sender_time[from_name]:\n ban_dict[from_name] = time.time() + 300\n banned_users.append(from_name)\n logger.debug(f'{from_name} has been banned for 5 min')\n else:\n recent_sender_dict[from_name] = time.time()\n logger.debug(f'{from_name} has been added to the recent_sender_dict')\n\n # If the ban list is empty, there is no reason to run this part of the code, so we first check if we need to.\n if len(banned_users) > 0:\n text = \"NO! Don't do that \" + \\\n ', '.join(str(banned_user) for banned_user in banned_users) + \\\n \", I am ignoring you for 5 min! \"\n telegram_api.send_message(text)\n\n # TODO: Clear the ban_dict = {} after a certain period.\n\n\ndef handle_messages(messages):\n \"\"\"\n A few checks need to happen on all incoming message.\n 1. Set some variables.\n 2. Check if the text in the message might be a command which starts with a '/'.\n 3. Check if the sender has been banned because of sending to many consecutive messages or in quick succession.\n 4. If the message starts with / and matches one of the commands, get update for coin and send to telegram chat.\n :param messages: All received messages since the last getUpdates()\n :return:\n \"\"\"\n\n # Some variables to use during this function call.\n update_ban_list(messages)\n coins_to_update = []\n\n # For each message we need to perform a few operations.\n logger.debug(f'Handling messages.')\n for message in messages[\"result\"]:\n\n try:\n # Set variables that will be used for each message.\n first_name = message['message']['from']['first_name']\n message_time = message['message']['date']\n message_text = message['message']['text']\n\n except KeyError:\n logger.debug(f'Not a message that needs to be handled.')\n continue\n\n # Check if sender is banned, if sender is in ban list move to next message.\n if first_name in ban_dict and message_time < ban_dict[first_name]:\n logger.debug(f'A banned sender tried to send a message.')\n continue\n\n '''\n This try/except will check if a received message is considered to be a command because the message stars\n with a /, it will also save the command as a variable called coin.\n If the message does not start with / it will not be a command and the loop can continue to the next message.\n '''\n try:\n search_coin = re_search(\"(?<=^/)[a-z]+\", message_text)\n coin = search_coin.group()\n logger.debug(f'The message received maybe a command as it starts with /.')\n except AttributeError:\n logger.debug(f'The message received is not a command, no / found.')\n continue\n\n '''\n Check if the /command matches any of the following:\n 1. bitcoincash, reason for this check is because the command option from the telegram @botfather does not\n allow \"-\" in the name, and breaks, so we need to specifically check for this and then add the correct coin\n as required by the Coin Market Cap API which is bitcoin-cash\n 2. all, if the command matches all, we will loop over all the coins in the coin_list and append it to a new\n list which we will used at the end to collect all the coins data from the Coin Market Cap API\n 3. , lastly if the command matches any of the coins in the coin_list, the coin will be added to the \n coins_to_update variable.\n '''\n logger.debug(f'Checking if the coin command variable matches any of the expected coins.')\n if 'coin' in locals():\n if coin == 'bitcoincash':\n coins_to_update.append('bitcoin-cash')\n\n elif coin == 'all':\n for coin in coin_list:\n coins_to_update.append(coin)\n\n elif coin in coin_list:\n coins_to_update.append(coin)\n\n '''\n Now that we have a list of all the coins from the latest messages we can loop over them to collect the coin data\n from the Coin Market Cap API and send it to the Telegram API to deliver to the chat_id. \n We also check if the response form the Coin Market Cap API returned any results. \n '''\n for coin_to_update in coins_to_update:\n coin = coinmarketcap_api.get_coin_data(coin_to_update)\n\n # Check if the coin variable contains any of the expected data.\n if 'name' in coin[0]:\n logger.debug(f'Setting up response for sending message to Telegram API.')\n coin_string = str(prepare_string_response_for_coin.Coin(coin))\n telegram_api.send_message(text=coin_string)\n\n logger.debug(f'No response received from the Coin Market Cap API.')\n continue\n\n\n","repo_name":"CryDeTaan/cryptoPriceUpdate2.1","sub_path":"core/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73882867663","text":"import math\nfrom codar.cheetah import Campaign\nfrom codar.cheetah import parameters as p\nfrom codar.savanna.machines import SummitNode\nfrom codar.cheetah.parameters import SymLink\nimport node_layouts as node_layouts\nimport sweeps as sweeps\nimport copy\n\n\nclass LAMMPS(Campaign):\n name = \"lammps\"\n\n codes = [ (\"simulation\", dict(exe=\"lmp\", adios_xml_file='adios2_config.xml')),\n (\"rdf_calc\", dict(exe=\"lmp\", adios_xml_file='adios2_config.xml')), \n (\"sim_inline_rdf_calc\", dict(exe=\"lmp\", adios_xml_file='adios2_config.xml')),]\n\n supported_machines = ['local', 'titan', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = None\n umask = '027'\n scheduler_options = {'theta': {'project':'CSC249ADCD01', 'queue': 'default'},\n 'summit': {'project':'csc303'}}\n app_config_scripts = {'local': 'setup.sh', 'theta': 'env_setup.sh', 'summit':'env-setup.sh'}\n\n sim_nodes = [1, 2]\n analysis_nodes = [1]\n sweeps = []\n sg_count = 1\n for s in sim_nodes:\n for a in analysis_nodes:\n if a > s:\n continue\n sw_inline = sweeps.inline_analysis(42*(s+a))\n sw_posthoc = sweeps.posthoc_analysis(42*s, 42*a)\n sw_insitu_sst_sep_nodes = sweeps.insitu_analysis(42*s, 42*a, node_layouts.separate_nodes(), 'SST')\n sw_insitu_sst_shared_nodes_21to21 = sweeps.insitu_analysis(21*(s+a), 21*(s+a), node_layouts.share_nodes_21to21(), 'SST')\n sw_insitu_sst_shared_nodes_28to14 = sweeps.insitu_analysis(28*(s+a), 14*(s+a), node_layouts.share_nodes_28to14(), 'SST')\n sw_insitu_sst_shared_nodes_35to7 = sweeps.insitu_analysis(35*(s+a), 7*(s+a), node_layouts.share_nodes_35to7(), 'SST')\n sw_insitu_sst_shared_nodes_40to2 = sweeps.insitu_analysis(40*(s+a), 2*(s+a), node_layouts.share_nodes_40to2(), 'SST')\n sw_insitu_bp4_sep_nodes = sweeps.insitu_analysis(42*s, 42*a, node_layouts.separate_nodes(), 'BP4')\n sw_insitu_bp4_shared_nodes_21to21 = sweeps.insitu_analysis(21*(s+a), 21*(s+a), node_layouts.share_nodes_21to21(), 'BP4')\n sw_insitu_bp4_shared_nodes_28to14 = sweeps.insitu_analysis(28*(s+a), 14*(s+a), node_layouts.share_nodes_28to14(), 'BP4')\n sw_insitu_bp4_shared_nodes_35to7 = sweeps.insitu_analysis(35*(s+a), 7*(s+a), node_layouts.share_nodes_35to7(), 'BP4')\n sw_insitu_bp4_shared_nodes_40to2 = sweeps.insitu_analysis(40*(s+a), 2*(s+a), node_layouts.share_nodes_40to2(), 'BP4')\n \n\n # Create a SweepGroup and add the above Sweeps. Set batch job properties such as the no. of nodes, \n sweepGroup = p.SweepGroup (\"sg-\"+str(sg_count),\n walltime=1200,\n per_run_timeout=200,\n parameter_groups=[sw_inline, sw_posthoc, sw_insitu_sst_sep_nodes, sw_insitu_sst_shared_nodes_21to21, sw_insitu_sst_shared_nodes_28to14, sw_insitu_sst_shared_nodes_35to7, sw_insitu_sst_shared_nodes_40to2, sw_insitu_bp4_sep_nodes, sw_insitu_bp4_shared_nodes_21to21, sw_insitu_bp4_shared_nodes_28to14, sw_insitu_bp4_shared_nodes_35to7, sw_insitu_bp4_shared_nodes_40to2],\n launch_mode='default',\n #nodes=16,\n component_inputs = {'simulation': ['in.lj.nordf'], 'rdf_calc': ['in.lj.rdf.rerun'], 'sim_inline_rdf_calc': ['in.lj.rdf.nodump']},\n run_repetitions=2, )\n \n sg_count += 1\n # Activate the SweepGroup\n sweeps.append(sweepGroup)\n\n","repo_name":"ornladios/ADIOS2-Testing","sub_path":"performance/lammps-rdf/lammps-campaign-spec.py","file_name":"lammps-campaign-spec.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"5027194202","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch import optim\nfrom torch.utils.data import DataLoader\n\nneg_prob = np.load(\"neg_prob.npy\")\ndataloader = DataLoader(neg_prob, batch_size=64, shuffle=True)\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n self.fc1 = nn.Linear(5533, 1024)\n self.fc21 = nn.Linear(1024, 32)\n self.fc22 = nn.Linear(1024, 32)\n self.fc3 = nn.Linear(32, 1024)\n self.fc4 = nn.Linear(1024, 5533)\n\n def encode(self, x):\n h1 = F.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n if torch.cuda.is_available():\n eps = torch.cuda.FloatTensor(std.size()).normal_()\n else:\n eps = torch.FloatTensor(std.size()).normal_()\n eps = Variable(eps)\n return eps.mul(std).add_(mu)\n\n def decode(self, z):\n h3 = F.relu(self.fc3(z))\n return F.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparametrize(mu, logvar)\n return self.decode(z), mu, logvar\n\nmodel = VAE()\nif torch.cuda.is_available():\n model.cuda()\n\nreconstruction_function = nn.MSELoss(size_average=False)\n\ndef loss_function(recon_x, x, mu, logvar):\n BCE = reconstruction_function(recon_x, x)\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n KLD = torch.sum(KLD_element).mul_(-0.5)\n\n return BCE + KLD\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\n\nfor epoch in range(200):\n model.train()\n train_loss = 0\n for batch_idx, data in enumerate(dataloader):\n data = Variable(data.float())\n if torch.cuda.is_available():\n data = data.cuda()\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss = loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.data[0]\n optimizer.step()\n print('Train Epoch {}; Loss {:.6f}'.format(epoch, train_loss / len(dataloader.dataset)))\n\nmodel.eval()\na, b = model.encode(torch.from_numpy(neg_prob).float().cuda())\nencoded_neg_prob = np.hstack((a.cpu().detach().numpy(), b.cpu().detach().numpy()))\nprint(encoded_neg_prob.shape)\nnp.save('encoded_neg_prob.npy', encoded_neg_prob)\n","repo_name":"victai/SDML","sub_path":"HW2/Task2/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22052276912","text":"import os\n\nimport cv2\nimport numpy as np\n\nfrom utils import logger, config\nfrom utils.predictor import Predictor\nfrom utils.get_image_list import get_image_list\nfrom python.preprocess import create_operators\nfrom python.postprocess import build_postprocess\n\n\nclass ClsPredictor(Predictor):\n def __init__(self, config):\n super().__init__(config[\"Global\"])\n\n self.preprocess_ops = []\n self.postprocess = None\n if \"PreProcess\" in config:\n if \"transform_ops\" in config[\"PreProcess\"]:\n self.preprocess_ops = create_operators(config[\"PreProcess\"][\n \"transform_ops\"])\n if \"PostProcess\" in config:\n self.postprocess = build_postprocess(config[\"PostProcess\"])\n\n # for whole_chain project to test each repo of paddle\n self.benchmark = config[\"Global\"].get(\"benchmark\", False)\n if self.benchmark:\n import auto_logger\n import os\n pid = os.getpid()\n size = config[\"PreProcess\"][\"transform_ops\"][1][\"CropImage\"][\n \"size\"]\n if config[\"Global\"].get(\"use_int8\", False):\n precision = \"int8\"\n elif config[\"Global\"].get(\"use_fp16\", False):\n precision = \"fp16\"\n else:\n precision = \"fp32\"\n self.auto_logger = auto_logger.AutoLogger(\n model_name=config[\"Global\"].get(\"model_name\", \"cls\"),\n model_precision=precision,\n batch_size=config[\"Global\"].get(\"batch_size\", 1),\n data_shape=[3, size, size],\n save_path=config[\"Global\"].get(\"save_log_path\",\n \"./auto_log.log\"),\n inference_config=self.config,\n pids=pid,\n process_name=None,\n gpu_ids=None,\n time_keys=[\n 'preprocess_time', 'inference_time', 'postprocess_time'\n ],\n warmup=2)\n\n def predict(self, images):\n use_onnx = self.args.get(\"use_onnx\", False)\n if not use_onnx:\n input_names = self.predictor.get_input_names()\n input_tensor = self.predictor.get_input_handle(input_names[0])\n\n output_names = self.predictor.get_output_names()\n output_tensor = self.predictor.get_output_handle(output_names[0])\n else:\n input_names = self.predictor.get_inputs()[0].name\n output_names = self.predictor.get_outputs()[0].name\n\n if self.benchmark:\n self.auto_logger.times.start()\n if not isinstance(images, (list, )):\n images = [images]\n for idx in range(len(images)):\n for ops in self.preprocess_ops:\n images[idx] = ops(images[idx])\n image = np.array(images)\n if self.benchmark:\n self.auto_logger.times.stamp()\n\n if not use_onnx:\n input_tensor.copy_from_cpu(image)\n self.predictor.run()\n batch_output = output_tensor.copy_to_cpu()\n else:\n batch_output = self.predictor.run(\n output_names=[output_names],\n input_feed={input_names: image})[0]\n\n if self.benchmark:\n self.auto_logger.times.stamp()\n if self.postprocess is not None:\n batch_output = self.postprocess(batch_output)\n if self.benchmark:\n self.auto_logger.times.end(stamp=True)\n return batch_output\n\n\ndef main(config):\n cls_predictor = ClsPredictor(config)\n\n clas_ids_list = []\n scores_str_list = []\n\n root_list = config[\"Global\"][\"infer_imgs\"]\n img_list = []\n i = 0\n for root, dirs, files in os.walk(root_list, topdown=True, onerror=None, followlinks=False):\n if i == 0:\n label_list = dirs\n img_list = files\n i = 1\n else:\n img_list.append(files)\n\n for i in range(24):\n for j in range(len(img_list)):\n infer_list = root_list + '/' + label_list[i] + '/' + img_list[i][j]\n image_list = get_image_list(infer_list)#不能传入list\n\n\n batch_imgs = []\n batch_names = []\n cnt = 0\n for idx, img_path in enumerate(image_list):\n img = cv2.imread(img_path)\n if img is None:\n logger.warning(\n \"Image file failed to read and has been skipped. The path: {}\".\n format(img_path))\n else:\n img = img[:, :, ::-1]\n batch_imgs.append(img)\n img_name = os.path.basename(img_path)\n batch_names.append(img_name)\n cnt += 1\n\n if cnt % config[\"Global\"][\"batch_size\"] == 0 or (idx + 1\n ) == len(image_list):\n if len(batch_imgs) == 0:\n continue\n batch_results = cls_predictor.predict(batch_imgs)\n for number, result_dict in enumerate(batch_results):\n if \"PersonAttribute\" in config[\n \"PostProcess\"] or \"VehicleAttribute\" in config[\n \"PostProcess\"]:\n filename = batch_names[number]\n print(\"{}:\\t {}\".format(filename, result_dict))\n else:\n clas_ids = result_dict[\"class_ids\"]\n scores_str = \"[{}]\".format(\", \".join(\"{:.2f}\".format(\n r) for r in result_dict[\"scores\"]))\n clas_ids_list.append(clas_ids)\n scores_str_list.append(scores_str)\n batch_imgs = []\n batch_names = []\n return clas_ids_list, scores_str_list\n\nif __name__ == \"__main__\":\n yaml_dir = \"inference_configs/inference_cls.yaml\"\n args = config.parse_args(yaml_dir)\n config = config.get_config(args.config, overrides=args.override, show=True)\n clas_ids_list, scores_str_list = main(config)\n print(clas_ids_list[:][1])\n print(scores_str_list[:][1])\n","repo_name":"fanzong996/PaddleDetection_voc_list_pre","sub_path":"infer_cmd.py","file_name":"infer_cmd.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24536943757","text":"from nltk.util import ngrams # function for making ngrams\nimport pandas as pd\nfrom collections import defaultdict\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, precision_recall_fscore_support\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\ndef extract_ngrams_from_line(string):\n splits = string.split()\n grams = ngrams(splits[1:], 1)\n gram_counts = defaultdict(int)\n for e in grams:\n gram_counts[str(e[0])] += 1\n gram_counts['classification'] = splits[0]\n\n return gram_counts\n\n\ndef evaluate(model, model_name, val_X, val_y, labels):\n y_predictions = model.predict(val_X)\n\n cmtx = pd.DataFrame(\n confusion_matrix(val_y, y_predictions, labels=labels),\n index=['actual: ' + label for label in labels],\n columns=['predicted: ' + label for label in labels]\n )\n precision, recall, fbeta, _ = precision_recall_fscore_support(\n val_y, y_predictions, average='weighted')\n\n print('\\n\\n----------', model_name, '----------\\n')\n print('\\n', cmtx, '\\n\\n')\n print('precision:', precision)\n print('recall:', recall)\n print('fbeta score:', fbeta)\n\n\ndef main():\n with open(\"sms_data.txt\", \"r\", encoding='latin-1') as file:\n text = file.read().split('\\n')\n data = []\n for line in text:\n if len(line) == 0: # eof\n break\n grams = extract_ngrams_from_line(line)\n data.append(grams)\n df = pd.DataFrame(data)\n\n models = [DecisionTreeClassifier(), RandomForestClassifier(), KNeighborsClassifier()]\n model_names = ['Decision tree', 'Random forest', 'K neighbors']\n\n df = df.fillna(0)\n text = None\n\n X = df.drop(columns=['classification'])\n y = df.classification\n\n train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)\n\n for model, name in zip(models, model_names):\n model.fit(train_X, train_y)\n labels = list(set(train_y))\n evaluate(model, name, val_X, val_y, labels)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DanielKrolopp/Conveyor","sub_path":"examples/sms_spam_dataset/sms_spam_detector_serial.py","file_name":"sms_spam_detector_serial.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"35926707657","text":"import logging\n\nimport numpy as np\nimport optuna\nimport torch\nimport wandb\n\nfrom copy import copy\n\nfrom .args import parse_tuning_args\nfrom .utils import get_group_name, get_wandb_mode, init_project_path\nfrom .run_model import run_daguerreo\n\nclass MultiObjectiveHPO():\n\n def __init__(self, args, project, group, wandb_mode):\n\n self.original_args = args\n self.project = project\n self.group = group\n self.wandb_mode = wandb_mode\n\n def _suggest_params(self, trial, args):\n\n if not args.joint:\n args.lr_theta = trial.suggest_loguniform(\"lr_theta\", 1e-4, 1e-1)\n\n if args.equations != \"lars\":\n args.lr = trial.suggest_loguniform(\"lr\", 1e-4, 1e-1)\n args.pruning_reg = trial.suggest_loguniform(\"pruning_reg\", 1e-6, 1e-1)\n \n if args.equations == \"nonlinear\":\n args.hidden = trial.suggest_categorical(\"hidden\", [10, 20, 50, 100])\n\n def __call__(self, trial):\n\n args = copy(self.original_args)\n self._suggest_params(trial, args)\n \n wandb_run = wandb.init(\n dir=args.results_path,\n entity=args.entity,\n project=self.project,\n name=f\"trial_{trial.number}\",\n group=self.group,\n config=vars(args),\n reinit=True,\n mode=self.wandb_mode,\n )\n \n log_dict = {}\n for noise in args.noise_models:\n\n print(f\"Running with noise model \\033[1m{noise}\\033[0m\")\n log_dict[noise] = {}\n args.sem_type = noise\n\n for graph in args.graph_types:\n \n logging.info(f\"graph type \\033[1m{graph}\\033[0m\")\n log_dict[noise][graph] = []\n args.graph_type = graph\n \n for edge_ratio in args.edge_ratios:\n\n args.s0 = int(edge_ratio * args.num_nodes)\n\n for seed in range(args.num_seeds):\n \n try:\n *_, seed_log_dict = run_daguerreo(args, seed)\n log_dict[noise][graph].append(seed_log_dict)\n \n except RuntimeError as e:\n logging.error(e)\n logging.info(\"Pruning current trial\")\n\n raise optuna.TrialPruned()\n \n noise_logs = [e for l in log_dict[noise].values() for e in l]\n log_dict[noise][\"avg_shdc\"] = np.mean([e[\"shdc\"] for e in noise_logs])\n log_dict[noise][\"avg_sid\"] = np.mean([e[\"sid\"] for e in noise_logs])\n\n log_dict[\"avg_shdc\"] = np.mean([log_dict[n][\"avg_shdc\"] for n in args.noise_models])\n log_dict[\"avg_sid\"] = np.mean([log_dict[n][\"avg_sid\"] for n in args.noise_models])\n\n wandb.log(log_dict)\n wandb_run.finish()\n\n return log_dict[\"avg_shdc\"], log_dict[\"avg_sid\"]\n\nif __name__ == \"__main__\":\n\n torch.set_default_dtype(torch.double)\n\n argparser = parse_tuning_args()\n args = argparser.parse_args()\n\n wandb_mode = get_wandb_mode(args)\n save_dir = init_project_path(args=args)\n\n group = get_group_name(args, log_graph_sem=False)\n\n objective = MultiObjectiveHPO(args, args.project, group, wandb_mode)\n \n study = optuna.create_study(\n study_name=\"hpo\",\n directions= [\"minimize\", \"minimize\"],\n # pruner=optuna.pruners.MedianPruner() # pruning not supported in MultiObjective\n )\n\n study.optimize(objective, n_trials=args.num_trials)\n\n df = study.trials_dataframe(attrs=(\"number\", \"value\", \"params\", \"state\"))\n\n best_ids = [t.number for t in study.best_trials]\n df_best = df.iloc[best_ids, :] \n\n df.to_csv(save_dir / f'{group}-trials.csv')\n df_best.to_csv(save_dir / f'{group}-best-trials.csv')\n","repo_name":"vzantedeschi/DAGuerreotype","sub_path":"daguerreo/hpo.py","file_name":"hpo.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"35856864502","text":"#!/usr/bin/env python3\n\nfrom .token import Token\nfrom .date_value import DateValue\nimport re\nimport math\n\n# This is a single value. Notably, it can have a Modifier\n# attached to it\nclass Value(Token):\n MIN_FLOAT_VALUE = 0.0000000001\n\n def __init__(self, value, modifier=None):\n # The constructor can take a modifier from elsewhere\n # or it can be attached later\n from .modifier import Modifier\n\n super().__init__(value)\n if modifier is not None:\n if isinstance(modifier, Value):\n modifier = modifier.modifier\n if modifier is not None:\n if not isinstance(modifier, Modifier):\n modifier = None\n self.modifier = modifier\n self.was_multiplier = False\n\n def get_desc(self):\n return \"val\"\n\n @staticmethod\n def as_date(value, is_time):\n from .modifier import Modifier\n value = DateValue(value, is_time)\n return Value(value, Modifier(Token.UNPRINTABLE + \"date\", \"date\"))\n\n @staticmethod\n def as_base(value, base):\n from .modifier import Modifier\n return Value(value, Modifier(Token.UNPRINTABLE + base, base))\n\n @staticmethod\n def as_value(value):\n if re.search(\"^[0-9,.]+$\", value):\n try:\n value = float(value.replace(\",\", \"\"))\n return Value(value)\n except:\n pass\n return None\n \n def to_string(self):\n if isinstance(self.value, str):\n return self.value\n\n # Turning this into a string is complicated due to the modifer\n from .modifier import Modifier\n\n # First off, turn the value itself into a string\n # We always add thousand seperator commas\n temp = None\n mod = self.modifier\n\n if temp is None and isinstance(mod, Modifier) and mod.get_type() == \"english\":\n # Handle english phrases\n from .word import Word\n return Word.to_english(self.value)\n\n # Try to handle the special edge case modifiers\n if temp is None and isinstance(mod, Modifier) and mod.value.startswith(Token.UNPRINTABLE):\n # The hex/oct/bin modifiers just dump out the number\n if self.modifier.value[1:] == \"hex\":\n return f\"0x{int(self.value):x}\"\n elif self.modifier.value[1:] == \"oct\":\n return f\"0o{int(self.value):o}\"\n elif self.modifier.value[1:] == \"bin\":\n return f\"0b{int(self.value):b}\"\n elif self.modifier.value[1:] == \"dec\":\n # Just ignore this modifier\n mod = None\n elif self.modifier.value[1:] == \"date\":\n # Turn the date into a date string\n if isinstance(self.value, DateValue):\n if self.value.is_time:\n if self.value.days() > 1:\n temp = f'{self.value.days()}d {self.value.value.strftime(\"%H:%M:%S\")}'\n else:\n temp = self.value.value.strftime(\"%H:%M:%S\")\n elif (self.value.value.hour + self.value.value.minute + self.value.value.second) != 0:\n temp = self.value.value.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n temp = self.value.value.strftime(\"%Y-%m-%d\")\n else:\n temp = str(self.value)\n\n if temp is None and isinstance(mod, Modifier) and mod.get_type() == \"currency\":\n # Currency is a big special, it has a fixed number of decimal places\n currency = mod.value.lower()\n if currency not in {\"bitcoin\", \"yen\", \"ethereum\"}:\n # Most currencies get two decimal places, always.\n temp = f\"{self.value:,.2f}\"\n elif currency in {\"yen\"}:\n # However, yen is treated as an integer\n temp = f\"{self.value:,.0f}\"\n elif currency in {\"bitcoin\"}:\n # BTC gets lots of precision\n temp = f\"{self.value:,.6f}\"\n elif currency in {\"ethereum\"}:\n # ETH gets four digits\n temp = f\"{self.value:,.4f}\"\n\n if temp is None and abs(self.value) > 0.1 and abs(self.value - int(self.value)) < Value.MIN_FLOAT_VALUE:\n # If it's really close to being an integer, just pretend it is one, but if it's really\n # near zero, go ahead and fall into the normal logic\n temp = f\"{self.value:,.0f}\"\n\n if temp is None:\n # Otherwise, just turn it into a string, giving about 8 significant digits\n temp = int(abs(self.value))\n if temp > 1:\n temp = max(2, min(7, 7 - int(math.log10(temp) + 1)))\n else:\n temp = 7\n temp = f\"{self.value:,.{temp}f}\"\n # If there are trailing zeros after a decimal, strip them\n if \".\" in temp:\n temp = temp.rstrip(\"0\").rstrip(\".\")\n\n if mod is None:\n # No modifier for this number, just return the number\n return temp\n else:\n if mod.value == Token.UNPRINTABLE + \"date\":\n # This is a note to how the dates are parsed, so ignore it\n return temp\n if mod.get_type() == \"currency\" and Modifier.get_normalized(mod.value) == \"USD\":\n # Special case USD, add the $ symbol at the front\n return f\"${temp}\"\n elif mod.get_type() == \"currency\" and Modifier.get_normalized(mod.value) == \"Euro\":\n # Special case EUR, add the Euro symbol at the front\n return f\"\\u20AC{temp}\"\n elif mod.get_type() == \"currency\" and Modifier.get_normalized(mod.value) == \"Japanese Yen\":\n # Special case JPY, add the Yen symbol at the front\n return f\"\\u00A5{temp}\"\n elif mod.get_type() == \"currency\" and Modifier.get_normalized(mod.value) == \"Pound Sterling\":\n # Special case GBP, add the Pound symbol at the front\n return f\"\\u00A3{temp}\"\n elif mod.add_space():\n # This modifier wants a space after the number, so give it one\n return f\"{temp} {mod.value}\"\n else:\n # Otherwise, just reutnr the number next to the modifier\n return f\"{temp}{mod.value}\"\n\n def clone(self):\n return Value(\n self.value, \n None if self.modifier is None else self.modifier.clone(),\n )\n","repo_name":"seligman/human_calc","sub_path":"calc/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"12018111545","text":"#\n# 代码中的类名、方法名、参数名已经指定,请勿修改,直接返回方法规定的值即可\n#\n# \n# @param height int整型一维数组 \n# @return int整型\n#\nclass Solution:\n def maxArea(self , height: List[int]) -> int:\n # write code here\n if len(height)<2:\n return 0\n l=0\n r=len(height)-1\n max_=0\n while r>l:\n area = min(height[l],height[r])*(r-l)\n max_ = max(area,max_)\n if height[l]>height[r]:\n r=r-1\n else:\n l=l+1\n return max_\n \n\n ","repo_name":"water-lib/My_Leecode","sub_path":"双指针/盛水容器最多.py","file_name":"盛水容器最多.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73643703822","text":"\"\"\"This module contains the TrafficSignal class, which represents a traffic signal in the simulation.\"\"\"\nimport os\nimport sys\nfrom typing import Callable, List, Union\n\n\nif \"SUMO_HOME\" in os.environ:\n tools = os.path.join(os.environ[\"SUMO_HOME\"], \"tools\")\n sys.path.append(tools)\nelse:\n raise ImportError(\"Please declare the environment variable 'SUMO_HOME'\")\nfrom typing import Union\n\nimport numpy as np\nfrom gymnasium import spaces\nfrom sumolib.net import Phase\n\n\nclass AbstractTrafficLightController:\n def __init__(self, sumo):\n \"\"\"Initialization of the controller\"\"\"\n pass\n\n def apply_action(self):\n \"\"\"Should set new phase/set new program\"\"\"\n pass\n\n def is_time_to_act(self):\n \"\"\"Boolean property/method which indicates if it is time to do something\"\"\"\n pass\n\n def update(self):\n \"\"\"Optional function that should update internal state\"\"\"\n pass\n\n\nclass TrafficLightsFixedCycleController:\n \"\"\"This class represents a Traffic Signal controlling an intersection.\n\n It is responsible for retrieving information and changing the traffic phase using the Traci API.\n\n IMPORTANT: It assumes that the traffic phases defined in the .net file are of the form:\n [green_phase, yellow_pactionhase, green_phase, yellow_phase, ...]\n Currently it is not supporting all-red phases (but should be easy to implement it).\n\n # Observation Space\n The default observation for each traffic signal agent is a vector:\n\n obs = [phase_one_hot, min_green, lane_1_density,...,lane_n_density, lane_1_queue,...,lane_n_queue]\n\n - ```phase_one_hot``` is a one-hot encoded vector indicating the current active green phase\n - ```min_green``` is a binary variable indicating whether min_green seconds have already passed in the current phase\n - ```lane_i_density``` is the number of vehicles in incoming lane i dividided by the total capacity of the lane\n - ```lane_i_queue``` is the number of queued (speed below 0.1 m/s) vehicles in incoming lane i divided by the total capacity of the lane\n\n You can change the observation space by implementing a custom observation class. See :py:class:`sumo_framework.environment.observations.ObservationFunction`.\n\n # Action Space\n Action space is discrete, corresponding to which green phase is going to be open for the next delta_time seconds.\n\n # Reward Function\n The default reward function is 'diff-waiting-time'. You can change the reward function by implementing a custom reward function and passing to the constructor of :py:class:`sumo_framework.environment.env.SumoEnvironment`.\n \"\"\"\n\n def __init__(\n self,\n trafficlight,\n tls_id: str,\n logic_id: Union[str, None] = None,\n delta_time: int = 5, ## ??\n min_green: Union[int, None] = None,\n max_green: Union[int, None] = None,\n force_min_max_duration: bool = False,\n ):\n \"\"\"Initializes a TrafficSignal object.\n\n Args:\n env (SumoEnvironment): The environment this traffic signal belongs to.\n ts_id (str): The id of the traffic signal.\n delta_time (int): The time in seconds between actions.\n min_green (int): The minimum time in seconds of the green phase.\n max_green (int): The maximum time in seconds of the green phase.\n begin_time (int): The time in seconds when the traffic signal starts operating.\n sumo (Sumo): The Sumo instance.\n \"\"\"\n self.tls_id = tls_id\n self.delta_time = delta_time\n # self.default_yellow_time = default_yellow_time\n\n self.min_green = min_green\n self.max_green = max_green\n\n self.force_min_max_dur = force_min_max_duration\n self.trafficlight = trafficlight\n\n self.is_phase_green = lambda phase: (\"g\" in phase.state or \"G\" in phase.state) and (\"y\" not in phase.state)\n self._build_phases()\n\n low = np.zeros(len(self.phase_id2action_id), dtype=np.int32)\n high = np.zeros_like(low)\n\n for phase_id, action_id in self.phase_id2action_id.items():\n phase = self.program.phases[phase_id]\n l, h = self.min_green, self.max_green\n\n if not self.force_min_max_dur and (phase.minDur != phase.maxDur):\n l = max(l, phase.minDur)\n h = min(h, phase.maxDur)\n\n low[action_id] = l\n high[action_id] = h\n self.action_space = spaces.Box(low=low, high=high, dtype=np.int32)\n\n def _build_phases(self, logic_id: str = None): # TODO: rewrite to be compitible use predefined programs\n if not logic_id:\n logic_id = self.trafficlight.getProgram(self.tls_id)\n\n logic = [l for l in self.trafficlight.getAllProgramLogics(self.tls_id) if l.programID == logic_id][0]\n\n self.program = logic\n self.phases = logic.phases\n\n self.phase_id2action_id = {}\n for i, phase in enumerate(self.phases):\n if self.is_phase_green(phase):\n self.phase_id2action_id[i] = len(self.phase_id2action_id)\n\n def is_time_to_act(self):\n return self.trafficlight.getPhase(self.tls_id) == self.act_phase and \\\n self.trafficlight.getNextSwitch(self.tls_id) > self.switch_time\n\n def apply_action(self, action):\n \"\"\"Application of the new green phases durations.\n\n Args:\n action (array[int]): green phases durations [d1, d2, ...]\n \"\"\"\n new_phases = []\n for ph_i, phase in enumerate(self.phases):\n if ph_i in self.phase_id2action_id:\n ac_i = self.phase_id2action_id[ph_i]\n new_phase = self.trafficlight.Phase(float(action[ac_i]), phase.state, -1.0, -1.0, (), phase.name)\n\n else:\n new_phase = phase\n new_phases.append(new_phase)\n\n new_phases = tuple(new_phases)\n phase_index = self.trafficlight.getPhase(self.tls_id)\n\n self.trafficlight.setProgramLogic(self.tls_id, self.trafficlight.Logic(\"var\", 0, phase_index, new_phases))\n self.trafficlight.setProgram(self.tls_id, \"var\")\n\n current_duration = new_phases[phase_index].duration\n self.trafficlight.setPhaseDuration(self.tls_id, current_duration)\n\n\nclass TrafficRealTimeController:\n \"\"\"This class represents a Traffic Signal controlling an intersection.\n\n It is responsible for retrieving information and changing the traffic phase using the Traci API.\n\n IMPORTANT: It assumes that the traffic phases defined in the .net file are of the form:\n [green_phase, yellow_phase, green_phase, yellow_phase, ...]\n Currently it is not supporting all-red phases (but should be easy to implement it).\n\n # Observation Space\n The default observation for each traffic signal agent is a vector:\n\n obs = [phase_one_hot, min_green, lane_1_density,...,lane_n_density, lane_1_queue,...,lane_n_queue]\n\n - ```phase_one_hot``` is a one-hot encoded vector indicating the current active green phase\n - ```min_green``` is a binary variable indicating whether min_green seconds have already passed in the current phase\n - ```lane_i_density``` is the number of vehicles in incoming lane i dividided by the total capacity of the lane\n - ```lane_i_queue``` is the number of queued (speed below 0.1 m/s) vehicles in incoming lane i divided by the total capacity of the lane\n\n You can change the observation space by implementing a custom observation class. See :py:class:`sumo_framework.environment.observations.ObservationFunction`.\n\n # Action Space\n Action space is discrete, corresponding to which green phase is going to be open for the next delta_time seconds.\n\n # Reward Function\n The default reward function is 'diff-waiting-time'. You can change the reward function by implementing a custom reward function and passing to the constructor of :py:class:`sumo_framework.environment.env.SumoEnvironment`.\n \"\"\"\n\n def __init__(\n self,\n env,\n ts_id: str,\n delta_time: int,\n yellow_time: int,\n min_green: int,\n max_green: int,\n begin_time: int,\n sumo,\n cyclic_mode: bool = False,\n ):\n \"\"\"Initializes a TrafficSignal object.\n\n Args:\n env (SumoEnvironment): The environment this traffic signal belongs to.\n ts_id (str): The id of the traffic signal.\n delta_time (int): The time in seconds between actions.\n yellow_time (int): The time in seconds of the yellow phase.\n min_green (int): The minimum time in seconds of the green phase.\n max_green (int): The maximum time in seconds of the green phase.\n begin_time (int): The time in seconds when the traffic signal starts operating.\n reward_fn (Union[str, Callable]): The reward function. Can be a string with the name of the reward function or a callable function.\n sumo (Sumo): The Sumo instance.\n cyclic_mode (bool): if True just two actions allowed: switch to next phase or not\n \"\"\"\n self.id = ts_id\n self.env = env\n self.delta_time = delta_time\n\n self.yellow_time = yellow_time\n self.min_green = min_green\n self.max_green = max_green\n\n self.green_phase = 0\n self.is_yellow = False\n self.time_since_last_phase_change = 0\n\n self.next_action_time = begin_time\n self.sumo = sumo\n\n self.is_phase_green = lambda phase: (\"g\" in phase.state or \"G\" in phase.state) and (\"y\" not in phase.state)\n self._build_phases()\n # self.lanes = list(\n # dict.fromkeys(self.sumo.trafficlight.getControlledLanes(self.id))\n # ) # Remove duplicates and keep order\n\n # self.out_lanes = [link[0][1] for link in self.sumo.trafficlight.getControlledLinks(self.id) if link]\n # self.out_lanes = list(set(self.out_lanes))\n # self.lanes_lenght = {lane: self.sumo.lane.getLength(lane) for lane in self.lanes + self.out_lanes}\n\n # self.observation_space = self.observation_fn.observation_space()\n\n self.action_space = spaces.Discrete(self.num_green_phases)\n self.cyclic_mode = cyclic_mode\n\n if self.cyclic_mode:\n self.action_space = spaces.Discrete(2)\n\n def _build_phases(self):\n logic_id = self.sumo.trafficlight.getProgram(self.id)\n logic = [l for l in self.sumo.trafficlight.getAllProgramLogics(self.id) if l.programID == logic_id][0]\n\n phases = logic.phases\n self.phases = logic.phases\n self.green_phases = []\n\n for phase in phases:\n state = phase.state\n if self.is_phase_green(phase):\n self.green_phases.append(self.sumo.trafficlight.Phase(phase.duration, state)) # maybe phase.min?\n\n self.num_green_phases = len(self.green_phases)\n\n self.all_phases = self.green_phases.copy()\n self.yellow_dict = {}\n\n for i, p1 in enumerate(self.green_phases):\n for j, p2 in enumerate(self.green_phases):\n if i == j:\n continue\n yellow_state = \"\"\n for s in range(len(p1.state)):\n if (p1.state[s] == \"G\" or p1.state[s] == \"g\") and (p2.state[s] == \"r\" or p2.state[s] == \"s\"):\n yellow_state += \"y\"\n else:\n yellow_state += p1.state[s]\n self.yellow_dict[(i, j)] = len(self.all_phases)\n self.all_phases.append(self.sumo.trafficlight.Phase(self.yellow_time, yellow_state))\n\n if self.env.fixed_ts:\n return\n logic.phases = self.all_phases\n\n self.sumo.trafficlight.setProgramLogic(self.id, logic)\n self.sumo.trafficlight.setRedYellowGreenState(self.id, self.all_phases[0].state)\n\n @property\n def time_to_act(self):\n \"\"\"Returns True if the traffic signal should act in the current step.\"\"\"\n return self.next_action_time == self.env.sim_step\n\n def update(self):\n \"\"\"Updates the traffic signal state.\n\n If the traffic signal should act, it will set the next green phase and update the next action time.\n \"\"\"\n self.time_since_last_phase_change += 1\n if self.is_yellow and self.time_since_last_phase_change == self.yellow_time:\n # self.sumo.trafficlight.setPhase(self.id, self.green_phase)\n self.sumo.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.green_phase].state)\n self.is_yellow = False\n\n return self.time_to_act\n\n def apply_action(self, new_phase: int):\n \"\"\"Sets what will be the next green phase and sets yellow phase if the next phase is different than the current.\n\n Args:\n new_phase (int): Number between [0 ... num_green_phases]\n \"\"\"\n new_phase = int(new_phase)\n if self.cyclic_mode:\n new_phase += self.green_phase\n new_phase = new_phase % len(self.green_phases)\n\n if (new_phase == self.green_phase) and (self.time_since_last_phase_change - self.yellow_time > self.max_green):\n new_phase += 1\n new_phase = new_phase % len(self.green_phases)\n\n if self.green_phase == new_phase or self.time_since_last_phase_change < self.yellow_time + self.min_green:\n # self.sumo.trafficlight.setPhase(self.id, self.green_phase)\n self.sumo.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.green_phase].state)\n self.next_action_time = self.env.sim_step + self.delta_time\n else:\n # self.sumo.trafficlight.setPhase(self.id, self.yellow_dict[(self.green_phase, new_phase)]) # turns yellow\n self.sumo.trafficlight.setRedYellowGreenState(\n self.id, self.all_phases[self.yellow_dict[(self.green_phase, new_phase)]].state\n )\n self.green_phase = new_phase\n self.next_action_time = self.env.sim_step + self.delta_time\n self.is_yellow = True\n self.time_since_last_phase_change = 0\n\n def default_action(self):\n self.next_action_time = self.env.sim_step + self.delta_time\n","repo_name":"sokratmillman/sumo-framework","sub_path":"sumo_framework/environment/traffic_controller.py","file_name":"traffic_controller.py","file_ext":"py","file_size_in_byte":14187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14138189173","text":"#Round Robin program using array\n\n\nprint(\"how many process you want to enter\")\n\nx=int(input())\nfinal=[]\nat=[0]*x\nbt=[0]*x\nrt=[0]*x\ncpu_cycle=0\ntotal_waiting=0\nflag=False\ncount=0\nremain=x\nquantum=int(input(\"time slice for round robin\"))\nturn_around=0\ntotal_waiting=0\nfor i in range(0,x):\n\tat[i]=int(input(\"enter arival time for p[\"+str(i)+\"]\"))\n\tbt[i]=int(input(\"enter burst time for p[\"+str(i)+\"]\"))\n\trt[i]=bt[i]\nprint('\\n\\n gannt chart')\n\nwhile remain!=0:\n\t#print(cpu_cycle,count)\n\tif at[count]<=cpu_cycle and rt[count]>0 and rt[count]<=quantum :\n\t\tcpu_cycle=cpu_cycle+rt[count];\n\t\trt[count]=0;\n\t\tflag=True;\n\t\tprint('p['+str(count)+']|'+str(cpu_cycle)+'|',end='')\n\telif rt[count]>quantum and at[count]<=cpu_cycle:\n\t\trt[count]=rt[count]-quantum\n\t\tcpu_cycle=cpu_cycle+quantum\n\t\tprint('p['+str(count)+']|'+str(cpu_cycle)+'|',end='')\n\t\n\tif flag is True:\n\t\tfinal.append('P['+str(count)+']\\t'+str(cpu_cycle-at[count])+'\\t'+str(cpu_cycle-at[count]-bt[count])+'\\t')\n\t\tturn_around=turn_around+cpu_cycle-at[count]\n\t\ttotal_waiting=total_waiting+cpu_cycle-at[count]-bt[count]\n\t\tremain=remain-1\n\t\tflag=False\n\tcount=count+1\n\tif count==x :\n\t\tcount=0\n\nprint('',end='\\n')\nfinal.append('\\navg:\\t'+str(turn_around/x)+'\\t'+str(total_waiting/x)+'\\t')\n\nfor i in final:\n\tprint(i)\n\n","repo_name":"nilesharv/schduling_algo","sub_path":"roundrobin.py","file_name":"roundrobin.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13203611","text":"# Find sum of a list with sum function and without function\n\n\na = [2,3,4,5,6,7,8,9,4,5,1,3]\nprint(sum(a))\nb = 0\n\nfor i in a:\n b =b+i\n \nprint(b)","repo_name":"95rehan/python","sub_path":"Python interview prepration/sum_of_a_list.py","file_name":"sum_of_a_list.py","file_ext":"py","file_size_in_byte":149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4216522350","text":"import matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nimport numpy as np\nimport pandas as pd\nimport math\nfrom sklearn.metrics import r2_score\nimport seaborn as sns\nimport matplotlib as mpl\nfrom numpy import polyfit, poly1d\nmpl.rcParams['font.sans-serif'] = ['KaiTi']\nmpl.rcParams['font.serif'] = ['KaiTi'] # 解决matplotlib无法显示中文的问题\nplt.rcParams['axes.unicode_minus'] = False # 解决负数坐标显示问题\nplt.rcParams['font.sans-serif'] = 'Times New Roman'\ndef result(pred,real):\n # mape\n mape=np.mean(np.abs((pred-real)/real))\n # rmse\n rmse=np.sqrt(np.mean(np.square(pred-real)))\n # mae\n mae=np.mean(np.abs(pred-real))\n # R2\n r2=r2_score(real,pred)\n\n ave_real=np.sum(real)\n # fenzi=np.sum(math.pow(real-pred,2))\n # fenmu=np.sum(math.pow(real-ave_real,2))\n # nse=1-fenzi/fenmu\n ave_real=np.mean(real)\n fenzi=np.sum(np.square(pred-real))\n fenmu=np.sum(np.square(real - ave_real))\n nse=1-fenzi/fenmu\n\n sse=np.sum(np.square(pred-real))\n return mape,rmse,mae,r2,nse,sse\n\n\ndef R(pred,real):\n r=np.corrcoef(pred,real)[0,1]\n return r\n\nalnbsa_true=loadmat(r'.\\result\\ALN-LSTM2(r).mat')['true']\nalnbsa_pred=loadmat(r'.\\result\\ALN-LSTM2(r).mat')['pred']\n\n\n\nalnbsa_mape1,alnbsar_mse1,alnbsa_mae1,alnbsa_r21,alnbsa_nse1,alnbsa_sse1=result(alnbsa_pred,alnbsa_true)\n\nprint('alnbsa的mape:',alnbsa_mape1,' rmse:',alnbsar_mse1,' mae:',alnbsa_mae1,' R2:',alnbsa_r21,' NSE:',alnbsa_nse1,' SSE:',alnbsa_sse1)\n\nxx=alnbsa_true.flatten()\ncoeff=polyfit(xx,alnbsa_pred,1)\nprint(\"coeff:\",coeff)\n\nr=R(alnbsa_pred,alnbsa_true)\nstd=np.std(alnbsa_pred,ddof=1)\nprint(\"std:\",std, 'R:',coeff)\n\n\nplt.figure(figsize=(8,6))\n# plt.xlabel(\"Raw data\",fontsize=14)\n# plt.ylabel(\"Predictive value\",fontsize=14)\nplt.xlabel(\"Raw remainder data(mm)\",fontsize=14)\nplt.ylabel(\"Predicted remainder value(mm)\",fontsize=14)\nplt.scatter(alnbsa_true,alnbsa_pred,c='r',marker='.',alpha=0.4,label='Sample data')\nplt.plot(xx,coeff[0]*xx+coeff[1],color='blue',label='Fit curve')\nplt.title(\"Correlation coefficient: R=0.5243\",fontsize=14)\nplt.legend(loc='upper left', fontsize=13)\nplt.savefig('.\\picture\\corr(r).png',dpi=1000,bbox_inches = 'tight')\nplt.show()\n","repo_name":"mxylovewyh/STL-ALNBSA-LSTM","sub_path":"correlation(r).py","file_name":"correlation(r).py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8193633633","text":"import os\nfrom dotenv import load_dotenv, dotenv_values\n\n# access envi vars by using the os.environ\n# print(os.environ)\n\n# access an env var - raise an exception if the key does not exist\nprint(os.environ[\"USER\"])\n\n# get() method is best practice\nprint(os.environ.get(\"DATABASE_URL\"))\n\n# add default value for missing var that is not None\ndatabase_url = os.environ.get(\"DATABASE_URL\", \"sqlite:///\")\n\n# getenv() === os.environ.get()\nuser = os.getenv(\"USER\")\ndatabase_url = os.getenv('DATABASE_URL', 'sqlite://') # set default for missing vars\n\nconfig = dotenv_values('.env.staging')\nprint(config['BASE_URL'])\n\n\nprint(os.getenv(\"BASE_URL\"))\n\n\n\n","repo_name":"ntbhoang/concepts-code-examples","sub_path":"src/configuration_files/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36856022023","text":"from django.test import TestCase\nfrom django.test import Client\nfrom accounts.models import User\nfrom rest_framework.authtoken.models import Token\n# Create your tests here.\n\n\nclass test_view(TestCase):\n\n def setUp(self) -> None:\n u = User(username='test')\n u.set_password('test')\n u.save()\n self.user = u\n\n self.c = Client()\n\n def test_func(self):\n token = Token.objects.create(user=self.user)\n token.save()\n auth_headers = {\n 'HTTP_AUTHORIZATION': token.key,\n }\n response = self.c.post('/categories', {\"cate\": \"test\"}, **auth_headers)\n # print(\"response.status_code:\", response.status_code)\n self.assertEqual(response.status_code, 200)\n\n response = self.c.get('/categories', **auth_headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data[0]['name'], 'test')\n\n # 修改名字\n put_del_url = '/category/' + str(response.data[0]['id'])\n response = self.c.put(put_del_url, data={\n \"cate_name\": \"change_test\"}, content_type=\"application/json\", **auth_headers)\n self.assertEqual(response.status_code, 200)\n\n response = self.c.get('/categories', **auth_headers)\n self.assertEqual(response.data[0]['name'], 'change_test')\n\n response = self.c.delete(put_del_url, **auth_headers)\n self.assertEqual(response.status_code, 200)\n\n response = self.c.get('/categories', **auth_headers)\n self.assertEqual(len(response.data), 0)\n","repo_name":"zzsealy/my-site","sub_path":"backend/backend/apps/blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"40333936558","text":"#This bolt analyzes trigger SentimentAnalysisBolt.py wich analyzes the\n# tweet text sentiment using VADER sentiment analysis tools. VADER is a\n# tool from the NLTK tool.\n\n\nfrom pystorm.bolt import Bolt\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nclass SentimentAnalysisBolt(Bolt):\n\tdef process(self, tup):\n\n\t\t# extract the sentence\n\t\tsentence = tup.values[0] \n\n\t\tsid = SentimentIntensityAnalyzer()\n\t\tss = sid.polarity_scores(sentence)\n\t\ttuple_result = (str(ss['neg']),str(ss['pos']),str(ss['neu']))\n\t\tself.emit(tuple_result)\n\t\t\n\nSentimentAnalysisBolt().run()\n\n\n\n\n","repo_name":"yahiaMI/Storm","sub_path":"TwitterTopology/multilang/resources/SentimentAnalysisBolt.py","file_name":"SentimentAnalysisBolt.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71573253902","text":"#!/usr/bin/env python\n__author__ = 'mahajrod'\nimport os\nimport argparse\nfrom Bio import SeqIO\nfrom RouToolPa.Routines.Sequence import rev_com_generator\n\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-i\", \"--input\", action=\"store\", dest=\"input\",\n help=\"fasta file with sequences\")\nparser.add_argument(\"-w\", \"--write_original\", action=\"store_true\", dest=\"write_original\", default=False,\n help=\"Write original records\")\nparser.add_argument(\"-o\", \"--output\", action=\"store\", dest=\"output\",\n help=\"fasta file with reverse complement sequences\")\n\nargs = parser.parse_args()\n\nrecord_dict = SeqIO.index_db(\"temp_index.idx\", [args.input], format=\"fasta\")\n\nSeqIO.write(rev_com_generator(record_dict, yield_original_record=args.write_original), args.output, \"fasta\")\n\nos.remove(\"temp_index.idx\")\n","repo_name":"mahajrod/MAVR","sub_path":"scripts/sequence/old/reverse_complement.py","file_name":"reverse_complement.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"9388795256","text":"import os, sys, unittest, shutil\nfrom collections import Counter\n\ndir = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(dir + '/../..')\n\nimport train, train_glove\n\nclass Namespace:\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n \ntextfile = dir + '/fixture/lorem_ipsum.txt'\n\nclass TestUtil(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n args = Namespace(\n debug=False,\n random_search=False,\n textfile=textfile,\n embedding_size=3,\n context_size=4,\n num_epochs=2, \n nb_search_iter=1,\n stem=True\n )\n log_dirs = train_glove.main(args)\n cls.tmp_glove_dir = log_dirs[0]\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.tmp_glove_dir)\n\n def test_main(self):\n args = Namespace(\n debug=False,\n profiling=False,\n num_epochs=2, \n batch_size=2,\n lr=1e-3, \n\n textfile= textfile,\n glove_dir=self.tmp_glove_dir,\n train_glove=True,\n \n cell_name='lstm',\n rnn_activation='tanh',\n seq_length=4,\n state_size=4,\n num_layers=1,\n tye_embedding=False\n )\n log_dir = train.main(args)\n shutil.rmtree(log_dir)\n\n def test_main_tye_embedding(self):\n args = Namespace(\n debug=False,\n profiling=False,\n num_epochs=2, \n batch_size=2,\n lr=1e-3, \n\n textfile= textfile,\n glove_dir=self.tmp_glove_dir,\n train_glove=True,\n \n cell_name='lstm',\n rnn_activation='tanh',\n seq_length=4,\n state_size=4,\n num_layers=1,\n tye_embedding=True\n )\n log_dir = train.main(args)\n shutil.rmtree(log_dir)\n\n def test_main_multiple_layers(self):\n args = Namespace(\n debug=False,\n profiling=False,\n num_epochs=2, \n batch_size=2,\n lr=1e-3, \n\n textfile= textfile,\n glove_dir=self.tmp_glove_dir,\n train_glove=True,\n \n cell_name='lstm',\n rnn_activation='tanh',\n seq_length=4,\n state_size=4,\n num_layers=3,\n tye_embedding=False\n )\n log_dir = train.main(args)\n shutil.rmtree(log_dir)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"metaflow-ai/BoobaBot","sub_path":"tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"11241696764","text":"import pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom pytorch_lightning.metrics.functional.classification import auroc\nfrom transformers import AdamW, get_linear_schedule_with_warmup, BertModel\n\nfrom dataset import ToxicCommentsDataset\nfrom config import BERT_MODEL, LABEL_COLUMNS, df, TOKENIZER, BATCH_SIZE, N_EPOCHS\n\n\"\"\"\n : Lightning DataModule\n\"\"\"\n\n\nclass ToxicCommentsDataModule(pl.LightningDataModule):\n def __init__(self, train_df, val_df, tokenizer, max_length=128, batch_size=8):\n super(ToxicCommentsDataModule, self).__init__()\n self.train_df = train_df\n self.val_df = val_df\n self.tokenizer = tokenizer\n self.max_len = max_length\n self.batch_size = batch_size\n\n def setup(self):\n self.train_dataset = ToxicCommentsDataset(\n self.train_df, self.tokenizer, self.max_len\n )\n\n self.val_dataset = ToxicCommentsDataset(\n self.train_df, self.tokenizer, self.max_len\n )\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=4\n )\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset, batch_size=1, shuffle=False, num_workers=4)\n\n def test_dataloader(self):\n return DataLoader(self.val_dataset, batch_size=1, shuffle=True, num_workers=4)\n\n\n# initiate the setup in the datamodule\ntrain_df, val_df = train_test_split(df, test_size=0.05)\ntrain_toxic = train_df[train_df[LABEL_COLUMNS].sum(axis=1) > 0]\ntrain_clean = train_df[train_df[LABEL_COLUMNS].sum(axis=1) == 0]\ntrain_df = pd.concat([train_toxic, train_clean.sample(15_000)])\ndatamodule = ToxicCommentsDataModule(\n train_df, val_df, TOKENIZER, max_length=128, batch_size=BATCH_SIZE\n)\ndatamodule.setup()\n\n\n\"\"\"\n : Lightning Module\n\"\"\"\n\n\nclass ToxicCommentClassifier(pl.LightningModule):\n def __init__(self, n_classes, steps_per_epoch, n_epochs):\n super(ToxicCommentClassifier, self).__init__()\n self.bert = BertModel.from_pretrained(BERT_MODEL)\n self.classifier = (self.bert.config.hidden_size, n_classes)\n self.steps_per_epoch = steps_per_epoch\n self.n_epochs = n_epochs\n self.criterion = nn.BCELoss()\n\n def forward(self, input_ids, attention_mask, labels=None):\n output = self.bert(input_ids, attention_mask=attention_mask)\n output = self.classifier(output.pooler_output)\n output = torch.sigmoid(output)\n loss = 0\n if labels is not None:\n loss = self.criterion(output, labels)\n return loss, output\n return output\n\n def training_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n loss, output = self(input_ids, attention_mask, labels)\n self.log(\"train_loss\", loss, prog_bar=True, logger=True)\n return {\"loss\": loss, \"predictions\": output, \"labels\": labels}\n\n def validation_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n loss, output = self(input_ids, attention_mask, labels)\n self.log(\"val_loss\", loss, prog_bar=True, logger=True)\n return loss\n\n def test_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n loss, output = self(input_ids, attention_mask, labels)\n self.log(\"test_loss\", loss, prog_bar=True, logger=True)\n return loss\n\n def training_epoch_end(self, outputs):\n labels = []\n predictions = []\n\n for output in outputs:\n for out_labels in output[\"labels\"].detach().cpu():\n labels.append(out_labels)\n\n for output in outputs:\n for out_preds in output[\"predictions\"].detach().cpu():\n predictions.append(out_preds)\n\n labels = torch.stack(labels)\n predictions = torch.stack(predictions)\n\n for i, name in enumerate(LABEL_COLUMNS):\n roc_score = auroc(predictions[:, i], labels[:, i])\n self.logger.experiment.add_scalar(\n f\"{name}_roc_auc/Train\", roc_score, self.current_epoch\n )\n\n def configure_optimizers(self):\n optimizer = AdamW(self.parameters(), lr=2e-5)\n warmup_steps = self.steps_per_epoch // 3\n total_steps = self.steps_per_epoch * self.n_epochs - warmup_steps\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer, warmup_steps, total_steps\n )\n\n return [optimizer], [scheduler]\n\n\n# MODEL instatitated\n\nmodel = ToxicCommentClassifier(\n n_classes=6, steps_per_epoch=len(train_df) // BATCH_SIZE, n_epochs=N_EPOCHS\n)\ntrainer = pl.Trainer(max_epochs=N_EPOCHS, gpus=1, progress_bar_refresh_rate=20)\ntrainer.fit(model, datamodule)\n","repo_name":"vrahul1997/pytorch_lightning_multilabel_text_toxic_cmnt_classification","sub_path":"src/toxic_lightning.py","file_name":"toxic_lightning.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27996455916","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# рунге-кут 4 порядка\ndef func(x):\n # return math.cos(x)\n # return -x * x\n return 1/(1+(x*x))\n\n\n# middle quads\ndef riemann_sum(f, a, b, N):\n dx = (b - a)/N\n x = np.linspace(a, b, N+1)\n x_mid = (x[:-1] + x[1:])/2\n sum = 0\n for i in x_mid:\n sum += f(i)\n return sum*dx\n\n\n# trapezoidal rule (заменяем интервал на простейший многочлен)\ndef trap(f, a, b, n):\n g = 0\n if b > a:\n h = (b-a)/float(n)\n else:\n h = (a-b)/float(n)\n for i in range(0, n):\n k = 0.5 * h * (f(a + i*h) + f(a + (i+1)*h))\n g = g + k\n\n return g\n\n\ndef simpson(f, a, b, n):\n h = (b-a)/n\n k = 0.0\n x = a + h\n for i in range(1, int(n/2) + 1):\n k += 4*f(x)\n x += 2*h\n x = a + 2*h\n for i in range(1, int(n/2)):\n k += 2*f(x)\n x += 2*h\n\n return (h/3)*(f(a)+f(b)+k)\n\n\na = 0\nb = 2\ne = 1e-5\n# automatic find optimal n\nfuncArray = [riemann_sum, trap, simpson]\nnameArray = [\"quad\", \"trapezoidal\", \"simpson\"]\nresultArray = []\nnArray = 0\n\nfor f in funcArray:\n n = 4\n resLast = f(func, a, b, int(n/2))\n resCurr = f(func, a, b, n)\n while abs(resLast - resCurr) > e:\n n *= 2\n resLast = resCurr\n resCurr = f(func, a, b, n)\n print(\"For\", nameArray[nArray], \"rule:\")\n print(\"n =\", n, \", result:\", resCurr)\n resultArray.append(resCurr)\n nArray += 1\n\nprint(\"Diff quad and trapezoidal\", abs(resultArray[1]- resultArray[0]))\nprint(\"Diff Simpson and trapezoidal\", abs(resultArray[2]- resultArray[1]))\nx = np.arange(a, b, abs((b-a)/n))\nx = np.append(x, b)\ny = []\nfor i in x:\n y.append(func(i))\nplt.plot(x, y, c=\"black\")\nplt.plot(x, np.zeros(len(x)))\nplt.show()\n","repo_name":"Uniquenik/numeric-methods","sub_path":"integrals/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23106054828","text":"from options.function_types import FunctionType\nfrom options.replacement_types import ReplacementType\nfrom options.selection_type import SelectionType\nfrom utils.opt_parser import OptConfig\n\n\ndef get_opt_config():\n return [\n OptConfig(\"sel_type\", \"S\", str, \"roulette\", lambda s: validate_value_in_enum(s, SelectionType)),\n OptConfig(\"rep_type\", \"r\", str, \"generation+\", lambda s: validate_value_in_enum(s, ReplacementType)),\n OptConfig(\"iterations\", \"i\", int, 100, lambda x: 10 <= x <= 5000),\n OptConfig(\"function\", \"f\", str, \"cigar\", lambda s: validate_value_in_enum(s, FunctionType)),\n OptConfig(\"dimensions\", \"d\", int, 2, lambda x: 2 <= x <= 10),\n OptConfig(\"crossover_p\", \"C\", float, 0.5, lambda x: 0.01 <= x <= 1.0),\n OptConfig(\"cardinality\", \"n\", int, 200, lambda x: 50 <= x <= 1000 and x % 2 == 0),\n OptConfig(\"attempts\", \"a\", int, 1, lambda x: 1 <= x <= 50),\n OptConfig(\"mut_sigma\", \"s\", float, 5, lambda x: 0 <= x <= 100),\n OptConfig(\"x_min\", \"m\", float, -100, lambda x: -100 <= x <= 0),\n OptConfig(\"x_max\", \"M\", float, 100, lambda x: 0 <= x <= 100)\n ]\n\n\ndef validate_value_in_enum(value, enum):\n return value in map(lambda e: e.value, list(enum))\n","repo_name":"BYEDUCK/pszt-1","sub_path":"options/opt_config.py","file_name":"opt_config.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35738074099","text":"import pandas as pd\n\n# import cdata as cmod \n# conn = cmod.connect(\"User=Prueba@WIN-SERVIDOR-BD\\DWGRUPOTOTAL;Password=Prueba_1;\")\n\n\n# cur = conn.cursor()\n# cur.execute(\"SELECT * FROM DAccidentes\")\n# rs = cur.fetchall()\n# for row in rs:\n# print(row)\n\ndir_datos = \"C:/Users/Usuario/portafolio/{}\"\ndf = pd.read_excel(dir_datos.format('AUXILIARES 2019.xlsx'), sheet_name='AUX VENDEDORES')\nfor i in df:\n print(i)\n ","repo_name":"leandrokategora/Portafolio","sub_path":"cdata.py","file_name":"cdata.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26670686728","text":"from typing import (\n Any,\n Callable,\n Dict,\n Generic,\n List,\n Literal,\n Optional,\n TypeVar,\n Union,\n cast,\n)\n\nfrom typing_extensions import NotRequired, TypeAlias, TypedDict\n\nfrom prosemirror.model.content import ContentMatch\nfrom prosemirror.model.fragment import Fragment\nfrom prosemirror.model.mark import Mark\nfrom prosemirror.model.node import Node, TextNode\nfrom prosemirror.utils import JSON, Attrs, JSONDict\n\n\ndef default_attrs(attrs: \"Attributes\") -> Optional[Attrs]:\n defaults = {}\n for attr_name, attr in attrs.items():\n if not attr.has_default:\n return None\n defaults[attr_name] = attr.default\n return defaults\n\n\ndef compute_attrs(attrs: \"Attributes\", value: Optional[Attrs]) -> Attrs:\n built = {}\n for name in attrs:\n given = None\n if value:\n given = value.get(name)\n if given is None:\n attr = attrs[name]\n if attr.has_default:\n given = attr.default\n else:\n raise ValueError(\"No value supplied for attribute \" + name)\n built[name] = given\n return built\n\n\ndef init_attrs(attrs: Optional[\"AttributeSpecs\"]) -> \"Attributes\":\n result = {}\n if attrs:\n for name in attrs:\n result[name] = Attribute(attrs[name])\n return result\n\n\nclass NodeType:\n \"\"\"\n Node types are objects allocated once per `Schema` and used to\n [tag](#model.Node.type) `Node` instances. They contain information\n about the node type, such as its name and what kind of node it\n represents.\n \"\"\"\n\n name: str\n\n schema: \"Schema[Any, Any]\"\n\n spec: \"NodeSpec\"\n\n inline_content: bool\n\n mark_set: Optional[List[\"MarkType\"]]\n\n def __init__(self, name: str, schema: \"Schema[Any, Any]\", spec: \"NodeSpec\") -> None:\n self.name = name\n self.schema = schema\n self.spec = spec\n self.groups = spec[\"group\"].split(\" \") if \"group\" in spec else []\n self.attrs = init_attrs(spec.get(\"attrs\"))\n self.default_attrs = default_attrs(self.attrs)\n self._content_match: Optional[ContentMatch] = None\n self.mark_set = None\n self.inline_content = False\n self.is_block = not (spec.get(\"inline\") or name == \"text\")\n self.is_text = name == \"text\"\n\n @property\n def content_match(self) -> ContentMatch:\n assert self._content_match is not None\n return self._content_match\n\n @content_match.setter\n def content_match(self, value: ContentMatch) -> None:\n self._content_match = value\n\n @property\n def is_inline(self) -> bool:\n return not self.is_block\n\n @property\n def is_text_block(self) -> bool: # FIXME: name is wrong, should be is_textblock\n return self.is_block and self.inline_content\n\n @property\n def is_leaf(self) -> bool:\n return self.content_match == ContentMatch.empty\n\n @property\n def is_atom(self) -> bool:\n return self.is_leaf or bool(self.spec.get(\"atom\"))\n\n @property\n def whitespace(self) -> Literal[\"pre\", \"normal\"]:\n return self.spec.get(\"whitespace\") or (\n \"pre\" if self.spec.get(\"code\") else \"normal\"\n )\n\n def has_required_attrs(self) -> bool:\n for n in self.attrs:\n if self.attrs[n].is_required:\n return True\n return False\n\n def compatible_content(self, other: \"NodeType\") -> bool:\n return self == other or (self.content_match.compatible(other.content_match))\n\n def compute_attrs(self, attrs: Optional[Attrs]) -> Attrs:\n if attrs is None and self.default_attrs is not None:\n return self.default_attrs\n return compute_attrs(self.attrs, attrs)\n\n def create(\n self,\n attrs: Optional[Attrs] = None,\n content: Union[Fragment, Node, List[Node], None] = None,\n marks: Optional[List[Mark]] = None,\n ) -> Node:\n if self.is_text:\n raise ValueError(\"NodeType.create cannot construct text nodes\")\n return Node(\n self,\n self.compute_attrs(attrs),\n Fragment.from_(content),\n Mark.set_from(marks),\n )\n\n def create_checked(\n self,\n attrs: Optional[Attrs] = None,\n content: Union[Fragment, Node, List[Node], None] = None,\n marks: Optional[List[Mark]] = None,\n ) -> Node:\n content = Fragment.from_(content)\n if not self.valid_content(content):\n raise ValueError(\"Invalid content for node \" + self.name)\n return Node(self, self.compute_attrs(attrs), content, Mark.set_from(marks))\n\n def create_and_fill(\n self,\n attrs: Optional[Attrs] = None,\n content: Union[Fragment, Node, List[Node], None] = None,\n marks: Optional[List[Mark]] = None,\n ) -> Optional[Node]:\n attrs = self.compute_attrs(attrs)\n frag = Fragment.from_(content)\n if frag.size:\n before = self.content_match.fill_before(frag)\n if not before:\n return None\n frag = before.append(frag)\n matched = self.content_match.match_fragment(frag)\n if not matched:\n return None\n after = matched.fill_before(Fragment.empty, True)\n if not after:\n return None\n return Node(self, attrs, frag.append(after), Mark.set_from(marks))\n\n def valid_content(self, content: Fragment) -> bool:\n result = self.content_match.match_fragment(content)\n if not result or not result.valid_end:\n return False\n for i in range(content.child_count):\n if not self.allows_marks(content.child(i).marks):\n return False\n return True\n\n def allows_mark_type(self, mark_type: \"MarkType\") -> bool:\n return self.mark_set is None or mark_type in self.mark_set\n\n def allows_marks(self, marks: List[Mark]) -> bool:\n if self.mark_set is None:\n return True\n return all(self.allows_mark_type(mark.type) for mark in marks)\n\n def allowed_marks(self, marks: List[Mark]) -> List[Mark]:\n if self.mark_set is None:\n return marks\n copy: Optional[List[Mark]] = None\n for i, mark in enumerate(marks):\n if not self.allows_mark_type(mark.type):\n if not copy:\n copy = marks[0:i]\n elif copy:\n copy.append(mark)\n if copy is None:\n return marks\n elif len(copy):\n return copy\n else:\n return Mark.none\n\n @classmethod\n def compile(\n cls, nodes: Dict[\"Nodes\", \"NodeSpec\"], schema: \"Schema[Nodes, Marks]\"\n ) -> Dict[\"Nodes\", \"NodeType\"]:\n result: Dict[\"Nodes\", \"NodeType\"] = {}\n\n for name, spec in nodes.items():\n result[name] = NodeType(name, schema, spec)\n\n top_node = cast(Nodes, schema.spec.get(\"topNode\") or \"doc\")\n if not result.get(top_node):\n raise ValueError(f\"Schema is missing its top node type {top_node}\")\n if not result.get(cast(Nodes, \"text\")):\n raise ValueError(\"every schema needs a 'text' type\")\n if result[cast(Nodes, \"text\")].attrs:\n raise ValueError(\"the text node type should not have attributes\")\n return result\n\n def __str__(self) -> str:\n return f\"\"\n\n def __repr__(self) -> str:\n return self.__str__()\n\n\nAttributes: TypeAlias = Dict[str, \"Attribute\"]\n\n\nclass Attribute:\n def __init__(self, options: \"AttributeSpec\") -> None:\n self.has_default = \"default\" in options\n self.default = options[\"default\"] if self.has_default else None\n\n @property\n def is_required(self) -> bool:\n return not self.has_default\n\n\nclass MarkType:\n excluded: List[\"MarkType\"]\n instance: Optional[Mark]\n\n def __init__(\n self, name: str, rank: int, schema: \"Schema[Any, Any]\", spec: \"MarkSpec\"\n ) -> None:\n self.name = name\n self.schema = schema\n self.spec = spec\n self.attrs = init_attrs(spec.get(\"attrs\"))\n self.rank = rank\n self.excluded = None # type: ignore[assignment]\n defaults = default_attrs(self.attrs)\n self.instance = None\n if defaults:\n self.instance = Mark(self, defaults)\n\n def create(\n self,\n attrs: Optional[Attrs] = None,\n ) -> Mark:\n if not attrs and self.instance:\n return self.instance\n return Mark(self, compute_attrs(self.attrs, attrs))\n\n @classmethod\n def compile(\n cls, marks: Dict[\"Marks\", \"MarkSpec\"], schema: \"Schema[Nodes, Marks]\"\n ) -> Dict[\"Marks\", \"MarkType\"]:\n result = {}\n rank = 0\n for name, spec in marks.items():\n result[name] = MarkType(name, rank, schema, spec)\n rank += 1\n return result\n\n def remove_from_set(self, set_: List[\"Mark\"]) -> List[\"Mark\"]:\n return [item for item in set_ if item.type != self]\n\n def is_in_set(self, set: List[Mark]) -> Optional[Mark]:\n return next((item for item in set if item.type == self), None)\n\n def excludes(self, other: \"MarkType\") -> bool:\n return any(other.name == e.name for e in self.excluded)\n\n\nNodes = TypeVar(\"Nodes\", bound=str, covariant=True)\nMarks = TypeVar(\"Marks\", bound=str, covariant=True)\n\n\nclass SchemaSpec(TypedDict, Generic[Nodes, Marks]):\n \"\"\"\n An object describing a schema, as passed to the [`Schema`](#model.Schema)\n constructor.\n \"\"\"\n\n # The node types in this schema. Maps names to\n # [`NodeSpec`](#model.NodeSpec) objects that describe the node type\n # associated with that name. Their order is significant—it\n # determines which [parse rules](#model.NodeSpec.parseDOM) take\n # precedence by default, and which nodes come first in a given\n # [group](#model.NodeSpec.group).\n nodes: Dict[Nodes, \"NodeSpec\"]\n\n # The mark types that exist in this schema. The order in which they\n # are provided determines the order in which [mark\n # sets](#model.Mark.addToSet) are sorted and in which [parse\n # rules](#model.MarkSpec.parseDOM) are tried.\n marks: NotRequired[Dict[Marks, \"MarkSpec\"]]\n\n # The name of the default top-level node for the schema. Defaults\n # to `\"doc\"`.\n topNode: NotRequired[str]\n\n\nclass NodeSpec(TypedDict, total=False):\n \"\"\"\n A description of a node type, used when defining a schema.\n \"\"\"\n\n content: str\n marks: str\n group: str\n inline: bool\n atom: bool\n attrs: \"AttributeSpecs\"\n selectable: bool\n draggable: bool\n code: bool\n whitespace: Literal[\"pre\", \"normal\"]\n definingAsContext: bool\n definingForContent: bool\n defining: bool\n isolating: bool\n toDOM: Callable[[Node], Any] # FIXME: add types\n parseDOM: List[Dict[str, Any]] # FIXME: add types\n toDebugString: Callable[[Node], str]\n leafText: Callable[[Node], str]\n\n\nAttributeSpecs: TypeAlias = Dict[str, \"AttributeSpec\"]\n\n\nclass MarkSpec(TypedDict, total=False):\n attrs: AttributeSpecs\n inclusive: bool\n excludes: str\n group: str\n spanning: bool\n toDOM: Callable[[Mark, bool], Any] # FIXME: add types\n parseDOM: List[Dict[str, Any]] # FIXME: add types\n\n\nclass AttributeSpec(TypedDict, total=False):\n default: JSON\n\n\nclass Schema(Generic[Nodes, Marks]):\n spec: SchemaSpec[Nodes, Marks]\n\n nodes: Dict[Nodes, \"NodeType\"]\n\n marks: Dict[Marks, \"MarkType\"]\n\n def __init__(self, spec: SchemaSpec[Nodes, Marks]) -> None:\n self.spec = spec\n self.nodes = NodeType.compile(self.spec[\"nodes\"], self)\n self.marks = MarkType.compile(self.spec.get(\"marks\", {}), self)\n content_expr_cache = {}\n for prop in self.nodes:\n if prop in self.marks:\n raise ValueError(f\"{prop} can not be both a node and a mark\")\n type = self.nodes[prop]\n content_expr = type.spec.get(\"content\", \"\")\n mark_expr = type.spec.get(\"marks\")\n if content_expr not in content_expr_cache:\n content_expr_cache[content_expr] = ContentMatch.parse(\n content_expr, cast(Dict[str, \"NodeType\"], self.nodes)\n )\n\n type.content_match = content_expr_cache[content_expr]\n type.inline_content = type.content_match.inline_content\n if mark_expr == \"_\":\n type.mark_set = None\n elif mark_expr:\n type.mark_set = gather_marks(self, mark_expr.split(\" \"))\n elif mark_expr == \"\" or not type.inline_content:\n type.mark_set = []\n else:\n type.mark_set = None\n for mark in self.marks.values():\n excl = mark.spec.get(\"excludes\")\n mark.excluded = (\n [mark]\n if excl is None\n else ([] if excl == \"\" else (gather_marks(self, excl.split(\" \"))))\n )\n\n self.top_node_type = self.nodes[cast(Nodes, self.spec.get(\"topNode\") or \"doc\")]\n self.cached: Dict[str, Any] = {}\n self.cached[\"wrappings\"] = {}\n\n def node(\n self,\n type: Union[str, NodeType],\n attrs: Optional[Attrs] = None,\n content: Union[Fragment, Node, List[Node], None] = None,\n marks: Optional[List[Mark]] = None,\n ) -> Node:\n if isinstance(type, str):\n type = self.node_type(type)\n elif not isinstance(type, NodeType):\n raise ValueError(f\"Invalid node type: {type}\")\n elif type.schema != self:\n raise ValueError(f\"Node type from different schema used ({type.name})\")\n return type.create_checked(attrs, content, marks)\n\n def text(self, text: str, marks: Optional[List[Mark]] = None) -> TextNode:\n type = self.nodes[cast(Nodes, \"text\")]\n return TextNode(\n type, cast(Attrs, type.default_attrs), text, Mark.set_from(marks)\n )\n\n def mark(\n self,\n type: Union[str, MarkType],\n attrs: Optional[Attrs] = None,\n ) -> Mark:\n if isinstance(type, str):\n type = self.marks[cast(Marks, type)]\n return type.create(attrs)\n\n def node_from_json(self, json_data: JSONDict) -> Union[Node, TextNode]:\n return Node.from_json(self, json_data)\n\n def mark_from_json(\n self,\n json_data: JSONDict,\n ) -> Mark:\n return Mark.from_json(self, json_data)\n\n def node_type(self, name: str) -> NodeType:\n found = self.nodes.get(cast(Nodes, name))\n if not found:\n raise ValueError(f\"Unknown node type: {name}\")\n return found\n\n\ndef gather_marks(schema: Schema[Any, Any], marks: List[str]) -> List[MarkType]:\n found = []\n for name in marks:\n mark = schema.marks.get(name)\n ok = mark\n if mark:\n found.append(mark)\n else:\n for mark in schema.marks.values():\n if name == \"_\" or (\n mark.spec.get(\"group\") and name in mark.spec[\"group\"].split(\" \")\n ):\n ok = mark\n found.append(mark)\n if not ok:\n raise SyntaxError(f\"unknow mark type: '{mark}'\")\n return found\n","repo_name":"fellowapp/prosemirror-py","sub_path":"prosemirror/model/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":15192,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"47"} +{"seq_id":"72866435984","text":"\r\n\r\n#10. Dada una lista de nombres de estudiantes y dos listas con sus notas en un curso, escriba un\r\n#programa que manipule dichas estructuras de datos para poder resolver los siguientes puntos:\r\n#A. Generar una estructura con todas las notas relacionando el nombre del estudiante con las\r\n#notas. Utilizar esta estructura para la resolución de los siguientes items.\r\n#B. Calcular el promedio de notas de cada estudiante.\r\n#C. Calcular el promedio general del curso.\r\n#D. Identificar al estudiante con la nota promedio más alta.\r\n#E. Identificar al estudiante con la nota más baja.\r\n\r\n\r\n\r\n\r\nnombres = ''' 'Agustin', 'Alan', 'Andrés', 'Ariadna', 'Bautista', 'CAROLINA', 'CESAR',\r\n'David','Diego', 'Dolores', 'DYLAN', 'ELIANA', 'Emanuel', 'Fabián', 'Facundo',\r\n'Francsica', 'FEDERICO', 'Fernanda', 'GONZALO', 'Gregorio', 'Ignacio', 'Jonathan',\r\n'Joaquina', 'Jorge','JOSE', 'Javier', 'Joaquín' , 'Julian', 'Julieta', 'Luciana',\r\n'LAUTARO', 'Leonel', 'Luisa', 'Luis', 'Marcos', 'María', 'MATEO', 'Matias',\r\n'Nicolás', 'Nancy', 'Noelia', 'Pablo', 'Priscila', 'Sabrina', 'Tomás', 'Ulises',\r\n'Yanina' '''\r\n\r\nnotas_1 = [81, 60, 72, 24, 15, 91, 12, 70, 29, 42, 16, 3, 35, 67, 10, 57, 11, 69,\r\n12, 77, 13, 86, 48, 65, 51, 41, 87, 43, 10, 87, 91, 15, 44,\r\n85, 73, 37, 42, 95, 18, 7, 74, 60, 9, 65, 93, 63, 74]\r\n\r\nnotas_2 = [30, 95, 28, 84, 84, 43, 66, 51, 4, 11, 58, 10, 13, 34, 96, 71, 86, 37,\r\n64, 13, 8, 87, 14, 14, 49, 27, 55, 69, 77, 59, 57, 40, 96, 24, 30, 73,\r\n95, 19, 47, 15, 31, 39, 15, 74, 33, 57, 10]\r\n\r\n\r\n\r\ndef armarDiccionario(nombres,notas1,notas2):\r\n \"\"\" Creo un diccionario que va tener como clave su nombre y el valor va a ser una tupla con sus dos notas asociadas\"\"\"\r\n nombres = nombres.replace('\\n','').replace(\"'\",'').replace(' ','').split(',')\r\n return dict(zip(nombres, zip(notas1, notas2)))\r\n\r\nDicAlumnosConNotas=armarDiccionario(nombres,notas_1,notas_2)\r\n\r\npromedioNotas = lambda nota1,nota2:((nota1+nota2)/2)\r\n\r\nalumnosNotas={'notaPromedioAlta':('',-1),'notaBaja':('',99999)}\r\ndef calcularNotaPromedioMasAlta(prom,alumno,alumnosNotas):\r\n \"\"\" Calcula la nota promedio mas alta \"\"\"\r\n if prom > alumnosNotas['notaPromedioAlta'][1]:\r\n alumnosNotas['notaPromedioAlta'] = (alumno, prom)\r\n \r\ndef calcularNotaMasBaja(elem,alumnosNotas):\r\n \"\"\" Calcula la nota mas baja \"\"\"\r\n for nota in elem[1:2]:\r\n if nota < alumnosNotas['notaBaja'][1]:\r\n alumnosNotas['notaBaja'] = (elem[0],nota)\r\n\r\npromedioTotal= lambda notas,lista:(notas/len(lista) )\r\n \r\n\r\nnotasTotalAlumnos=0\r\n\r\nfor nombre, notas in DicAlumnosConNotas.items():\r\n promNotasAlumno = promedioNotas(notas[0], notas[1])\r\n print(f'El alumno {nombre} tiene un promedio de {promNotasAlumno}')\r\n notasTotalAlumnos += promNotasAlumno\r\n calcularNotaPromedioMasAlta(promNotasAlumno, nombre, alumnosNotas)\r\n calcularNotaMasBaja((nombre, notas[0], notas[1]), alumnosNotas)\r\n \r\n\r\nprint(f'El promedio general del curso es {promedioTotal(notasTotalAlumnos,DicAlumnosConNotas):.3f} ')\r\nprint(f'El alumno con la nota mas baja es {alumnosNotas[\"notaBaja\"]}')\r\nprint(f'El alumno con nota promedio mas alta es {alumnosNotas[\"notaPromedioAlta\"]}')\r\n","repo_name":"BautistaBordes/Python","sub_path":"Entrega_2/ejer10.py","file_name":"ejer10.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21608609687","text":"### 7.5\n\n# **1.** In **\"SARS-CoV-2_S.gb\"** file, you will find the GenBank sequence\n# records of the 'S' gene for the first 50 accessions we used in previous\n# exercises. \n# Can you create a **fasta** file that contains the spike protein sequences\n# for these? Try to keep their `.id` and avoid translating stop codons!\n\nwith open(\"spike_proteins.fa\", \"w\") as fasta:\n for rec in SeqIO.parse(\"data/SARS-CoV-2_S.gb\", \"genbank\"):\n prot = rec.translate(to_stop=True)\n prot.id = rec.id\n SeqIO.write(prot, fasta, \"fasta\")\n\n\n# **2**. Did you notice the *Warning* above, when we try to translate the\n# first 50 accessions.\n# It seems the length of one or more of our 'S' CDSs is not multiple of three.\n# Can you find which one?\n\nfor rec in SeqIO.parse(\"data/SARS-CoV-2_S.gb\", \"genbank\"):\n for feature in rec.features:\n if feature.type == 'gene' and 'S' in feature.qualifiers['gene']:\n s_length = feature.location.end - feature.location.start\n if s_length % 3 != 0:\n print(\"'S' gene from '{}' has a length of {}, which is not a multiple of 3\".format(\n rec.id, s_length))","repo_name":"sib-swiss/first-steps-with-python-training","sub_path":"notebooks/solutions/solution_75.py","file_name":"solution_75.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"47"} +{"seq_id":"41566029035","text":"class Programming_Language_Course:\n def __init__(self, courseCode , startOfCourse , endOfCourse , courseFee , courseStructor ,courseLevel ):\n self.__courseCode=courseCode\n self._startOfCourse=startOfCourse\n self._endOfCourse=endOfCourse\n self._courseFee=courseFee\n self._courseStructor=courseStructor\n self._courseLevel =courseLevel \n self._courseDays=[]\n self.dicShow={\"Course_Code is\":self.__courseCode}\n def _showInfo(self):\n return self.dicShow\n def add_Days(self,day):\n self._courseDays.append(day)\n @property\n def courseCode(self):\n return self.__courseCode\n @courseCode.setter\n def courseCode(self,courseCode):\n self.__courseCode= courseCode \n#//////////////////////////////\nclass Python(Programming_Language_Course):\n def __init__(self, courseCode , startOfCourse , endOfCourse , courseFee , courseStructor,courseLevel ):\n super().__init__(courseCode,startOfCourse,endOfCourse,courseFee,courseStructor,courseLevel )\n self.dicPythonShow={\"start of python course is\":self._startOfCourse,\"end of python course is\":self._endOfCourse, \"python course Fee is\":self._courseFee,\"python course structor is\":self._courseStructor,\"python course days is\":self._courseDays,\"course level is\": self._courseLevel}\n self.dicShow.update(self.dicPythonShow)\n def showInfoPythonCourse(self):\n return self.dicShow\n#//////////////////////////////\nclass Java(Programming_Language_Course):\n def __init__(self, courseCode , startOfCourse , endOfCourse , courseFee , courseStructor ,courseLevel):\n super().__init__(courseCode,startOfCourse,endOfCourse,courseFee,courseStructor,courseLevel)\n self.dicJavaShow={\"start of java course is\":self._startOfCourse,\"end of java course is\":self._endOfCourse, \"java course Fee is\":self._courseFee,\"java course structor is\":self._courseStructor,\"java course days is\":self._courseDays,\"course level is\": self._courseLevel}\n self.dicShow.update(self.dicJavaShow)\n def showInfoJavaCourse(self):\n return self.dicShow\n#//////////////////////////////\nclass PHP(Programming_Language_Course):\n def __init__(self, courseCode , startOfCourse , endOfCourse , courseFee , courseStructor,courseLevel ):\n super().__init__(courseCode,startOfCourse,endOfCourse,courseFee,courseStructor,courseLevel)\n self.dicPHPshow={\"start of PHP course is\":self._startOfCourse,\"end of PHP course is\":self. _endOfCourse, \"PHP course Fee is\":self._courseFee,\"PHP course structor is\":self._courseStructor,\"PHP course days is\":self._courseDays,\"course level is\": self._courseLevel}\n self.dicShow.update(self.dicPHPshow)\n def showInfoPHPcourse(self):\n return self.dicShow\n#////////////////////////////\np1=Python(2345,\"1400/6/25\",\"1400/9/3\",2500,\"mahdavi\",\"Basic_level\")\np1.add_Days(\"sunday\")\np1.add_Days(\"tuesday\")\np1.add_Days(\"wendesday\")\np2=Python(3642,\"1400/9/5\",\"1400/12/20\",3000,\"mahdavi\",\"ََAdvanced_level\")\np2.add_Days(\"sunday\")\np2.add_Days(\"tuesday\")\np2.add_Days(\"wendesday\")\nj1=Java(5120,\"1401/1/5\",\"1401/2/3\",2000,\"karimi\",\"Basic_level\")\nj1.add_Days(\"saturday\")\nj1.add_Days(\"sunday\")\nj1.add_Days(\"monday\")\nj2=Java(1245,\"1401/2/5\",\"1401/3/3\",2200,\"karimi\",\"Advanced_level\")\nj2.add_Days(\"saturday\")\nj2.add_Days(\"sunday\")\nj2.add_Days(\"monday\")\nphp1=PHP(2580,\"1401/1/15\",\"1401/3/15\",5000,\"ghodosi\",\"Basic_level\")\nphp1.add_Days(\"sunday\")\nphp1.add_Days(\"tusesday\")\nphp1.add_Days(\"wendesday\")\nphp1.add_Days(\"thursday\")\nphp2=PHP(2131,\"1401/3/16\",\"1401/4/3\",5500,\"ghodosi\",\"Advanced_level\")\nphp2.add_Days(\"sunday\")\nphp2.add_Days(\"tusesday\")\nphp2.add_Days(\"wendesday\")\nphp2.add_Days(\"thursday\")\nlist_show=[p1.showInfoPythonCourse(),p2.showInfoPythonCourse(),j1.showInfoJavaCourse(),j2.showInfoJavaCourse(),php1.showInfoPHPcourse(),php2.showInfoPHPcourse()]\nfor course in list_show:\n print(course,\"\\n\")\n","repo_name":"rezaho96/AI_Course","sub_path":"Homework/Homework3_1.py","file_name":"Homework3_1.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72686716942","text":"# https://leetcode.com/problems/subtree-of-another-tree/\nfrom typing import Optional\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nfrom collections import deque\nclass Solution:\n def isSubtree(self, root: Optional[TreeNode], subRoot: Optional[TreeNode]) -> bool:\n def equal(root1,root2):\n if not root1 and not root2:\n return True\n if (root1 and not root2) or (not root1 and root2):\n return False\n if root1.val!=root2.val:\n return False\n return equal(root1.left,root2.left) and equal(root1.right,root2.right)\n queue=deque([root])\n while(queue):\n node=queue.pop()\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n if equal(node,subRoot):\n return True\n return False\n\n\n","repo_name":"luohwu/DailyAlgorithmExercise","sub_path":"tree/Subtree of Another Tree.py","file_name":"Subtree of Another Tree.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"258370359","text":"import argparse\nimport http.client\nimport tarfile\n\nimport urllib.parse\nfrom io import BytesIO\nfrom os import remove, rename\nfrom os.path import exists, realpath, relpath\n\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\nclass StatPlotter(tk.Tk):\n INDEX = 'Date'\n headers: list[str]\n df: pd.DataFrame\n app: ttk.Frame\n options: ttk.LabelFrame\n buttons: ttk.Frame\n ok: ttk.Button\n cancel: ttk.Button\n checks: list[ttk.Checkbutton]\n\n def __init__(self, file: str) -> None:\n if not file.endswith('.csv'):\n raise Exception('Not a csv file')\n super().__init__()\n self.df = pd.read_csv(file)\n self.headers = list(self.df)\n self.app = ttk.Frame(self, padding=(5, 2))\n self.options = ttk.LabelFrame(self.app, text='Select columns',\n borderwidth=10)\n self.buttons = ttk.Frame(self.app,\n borderwidth=2, padding=(5, 2))\n self.ok = ttk.Button(self.buttons, text='OK',\n command=self.__plot)\n self.cancel = ttk.Button(self.buttons, text='Cancel',\n command=self.destroy)\n self.checks = [ttk.Checkbutton(self.options, text=i, onvalue=True, offvalue=False)\n for i in self.headers if i != self.INDEX]\n\n self.minsize(230, 150)\n self.title('spaceship')\n self.resizable(False, False)\n self.app.pack(fill='both')\n self.options.pack(side='top', fill='both')\n\n for i in self.checks:\n i.state(['selected'])\n i.pack(anchor='w')\n\n self.buttons.pack(side='bottom')\n self.ok.pack(side='left')\n self.cancel.pack(side='right')\n self.__center()\n\n def __plot(self) -> None:\n self.df[[self.INDEX, *(h for c, h in zip(self.checks,\n self.headers) if c.state())]].plot(self.INDEX)\n # self.destroy()\n plt.show()\n\n def __center(self, geometry: str = '') -> None:\n if geometry:\n ww, wh, = map(int, geometry.split('x'))\n else:\n ww = self.winfo_reqwidth()\n wh = self.winfo_reqheight()\n x = self.winfo_screenwidth() // 2 - ww // 2\n y = self.winfo_screenheight() // 2 - wh // 2\n if geometry:\n self.geometry('{}x{}+{}+{}'.format(ww, wh, x, y))\n else:\n self.geometry('+{}+{}'.format(x, y))\n\n\nclass Util:\n SERVER = '127.0.0.1:3000'\n TOKEN = 'TOKEN GOES HERE'\n\n @staticmethod\n def path(path: str) -> str:\n if exists(path):\n return path\n raise argparse.ArgumentTypeError(f'{path} is not a valid path')\n\n @staticmethod\n def __get_task() -> str:\n if exists('.task'):\n with open('.task', 'r') as f:\n return f.read()\n else:\n raise LookupError\n\n @staticmethod\n def authenticate(token: str) -> None:\n path = realpath(__file__)\n with open(path, 'r') as r, open(path+'.tmp', 'w') as w:\n w.write(r.read().replace('TOKEN GOES HERE', token))\n remove(path)\n rename(path+'.tmp', path)\n\n @staticmethod\n def initialize(name: str) -> None:\n with open('.task', 'w') as f:\n f.write(name)\n print(f'Task {name} initialised')\n\n @staticmethod\n def result() -> None:\n try:\n name = Util.__get_task()\n con = http.client.HTTPConnection(Util.SERVER)\n con.request('POST', '/task/result',\n urllib.parse.urlencode({'token': Util.TOKEN, 'name': name}))\n print(con.getresponse().read().decode('utf8'))\n con.close()\n except LookupError:\n print('You have no active tasks')\n\n @staticmethod\n def create(files: list[str]) -> None:\n files = [relpath(i) for i in files]\n\n if 'Makefile' not in files:\n print('You must provide a top-level Makefile')\n return\n\n try:\n name = Util.__get_task()\n data = BytesIO()\n with tarfile.open(fileobj=data, mode='w') as tar:\n for f in files:\n tar.add(f)\n\n con = http.client.HTTPConnection(Util.SERVER)\n con.request('POST', '/task/create',\n urllib.parse.urlencode({'token': Util.TOKEN, 'name': name, 'tar': data.getvalue()}))\n print(con.getresponse().read().decode('utf8'))\n con.close()\n\n except LookupError:\n print('You have no active tasks')\n\n @staticmethod\n def statistics() -> None:\n try:\n name = Util.__get_task()\n con = http.client.HTTPConnection(Util.SERVER)\n con.request('POST', '/stats',\n urllib.parse.urlencode({'token': Util.TOKEN, 'name': name}))\n stats = con.getresponse().read().decode('utf8')\n con.close()\n with open(f'{name}.csv', 'w') as f:\n f.write(stats)\n except LookupError:\n print('You have no active tasks')\n\n @staticmethod\n def plot(file: str) -> None:\n StatPlotter(file).mainloop()\n\ndef main() -> None:\n\n parser = argparse.ArgumentParser(description='Online CUDA compiler proxy', epilog='''You must also provide a Makefile that compiles and executes your program like this:\n all:\n gcc test.c -o test\n ./test\n ''', formatter_class=argparse.RawTextHelpFormatter)\n subparser = parser.add_subparsers(dest='command')\n auth_p = subparser.add_parser('auth', help='Authenticate access token')\n init_p = subparser.add_parser('init', help='Initialise current task')\n post_p = subparser.add_parser(\n 'send', help='Send current task for compilation')\n subparser.add_parser('res', help='Get last posted task result')\n subparser.add_parser('stat', help='Download run statistics of active task')\n stat_p = subparser.add_parser('plot', help='Visualize run statistics stored in a csv file')\n\n auth_p.add_argument('token', type=str)\n init_p.add_argument('name', type=str)\n post_p.add_argument('files', nargs='+', type=Util.path)\n stat_p.add_argument('file', nargs=1, type=Util.path)\n args = parser.parse_args()\n\n if args.command == 'auth':\n Util.authenticate(args.token)\n\n elif args.command == 'init':\n Util.initialize(args.name)\n\n elif args.command == 'send':\n Util.create(args.files)\n\n elif args.command == 'res':\n Util.result()\n\n elif args.command == 'stat':\n Util.statistics()\n\n elif args.command == 'plot':\n Util.plot(args.file[0])\n\n else:\n parser.print_help()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"COOLIRON2311/spaceship","sub_path":"public/sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17453955863","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom bs4 import Comment\nimport pandas as pd\n\n# url that we are scraping\nurl = \"https://www.sports-reference.com/cbb/players/jeffery-taylor-1.html\"\n\n# this is the html from the given url\nhtml = urlopen(url)\n\n# create a BeautifulSoup object by passing through html to the BeautifulSoup() constructor.\n# lxml is a html parser\nsoup = BeautifulSoup(html, 'lxml')\n\n# column header of per game statistics\n# The line below gets us the column_headers\n# column_headers_per_game = [th.getText() for th in soup.find('thead').find('tr').findAll('th')]\ncolumn_headers_per_game = ['Season', 'School', 'Conf', 'G', 'GS', 'MP',\n 'FG', 'FGA', 'FG%', '2P', '2PA', '2P%', '3P',\n '3PA','3P%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB',\n 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS',\n '\\xa0', 'SOS']\n# There is a weird element '\\xa0' but we don't worry about it\n# We do not want the Season column because the HTML structure makes it more\n# trouble to extract the data, plus knowing the season is useless anyway\ncolumn_headers_per_game.pop(0)\n\n# Retrieving the per game statistics\nstats_per_game = [[td.getText() for td in soup.find('tfoot').find('tr').findAll('td')]]\n\n# Constructing the data frame\ndf_per_game = pd.DataFrame(stats_per_game, columns=column_headers_per_game)\n\n# Now we want some of that advanced stats\n# For some reason, the advanced stats for players drafted after 2011 is different than before\n# This is the column header for advanced stats after 2011\ncolumn_headers_advanced = ['Season', 'School', 'Conf', 'G', 'GS', 'MP',\n 'PER', 'TS%', 'eFG%', '3PAr', 'FTr', 'PProd',\n 'ORB%', 'DRB%', 'TRB%', 'AST%', 'STL%', 'BLK%',\n 'TOV%', 'USG%', '', 'OWS', 'DWS', 'WS', 'WS/40',\n '', 'OBPM', 'DBPM', 'BPM']\n# We do not want the Season column because the HTML structure makes it more\n# trouble to extract the data, plus knowing the season is useless anyway\ncolumn_headers_advanced.pop(0)\n\n# Weirdly enough, all the advanced statistics are included as comments in the HTML file\n# Ergo we need to use the below in order to parse through the comments\ncomments = soup.findAll(text=lambda text:isinstance(text, Comment))\nfor c in comments:\n data = BeautifulSoup(c,\"lxml\")\n for items in data.select(\"table#players_advanced\"):\n # Retrieving the advanced statistics\n stats_advanced = [[item.get_text(strip=True) for item in items.find(\"tfoot\").find(\"tr\").select(\"td\")]]\n # data must be tended further if draft year is before 2011\n # specfically, PER, OBPM, DBPM, BPM and an empty column are missing\n # we insert blank into respective positions as place holders\n # if draft_year < 2011:\n #stats_advanced[0].insert(5, '')\n #for i in range(4):\n #stats_advanced[0].append('')\n\n df_advanced = pd.DataFrame(stats_advanced, columns=column_headers_advanced)\n\n # Some columns are redundant because they are already in per game stats\n df_advanced = df_advanced.drop(columns=['School', 'Conf', 'G', 'GS', 'MP'])\n\ndf_stats_player = pd.DataFrame.join(df_per_game,df_advanced)\ndf_stats_player = df_stats_player.drop(columns=['School','Conf','G','GS','PER','PProd','ORB%','DRB%','STL%','OBPM','DBPM','BPM'])\ndf_stats_player = df_stats_player.drop(df_stats_player.columns[38])\nprint(df_stats_player)\nprint(df_stats_player.columns)\n\n#df_stats_player.to_csv(\"jeffery taylor.csv\")","repo_name":"cheryonthetop/NCAA-Draftees-Career-PER-Prediction","sub_path":"Draft_College_Single_Player_SR_Stats.py","file_name":"Draft_College_Single_Player_SR_Stats.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"38799928939","text":"'''\nNot currently used or rejected functions.\n\n@author: Artem Sevastopolsky\n'''\n\n\n# ----------------------------------------------------\n# Functions that are used for contrast normalization in subimages follow\n# [] Huiqi Li et al. - Automated Feature Extraction in Color Retinal Images by a Model Based Approach\"\n\n\ndef hist2D(img):\n '''Returns dict of occurences of each pair (a, b) in a given 2-channel image img.'''\n if len(img.shape) != 3 or img.shape[2] != 2:\n raise Exception('hist2D() receives only 2-channel images')\n hist = defaultdict(int)\n for i in xrange(img.shape[0]):\n for j in xrange(img.shape[1]):\n hist[(img[i, j, 0], img[i, j, 1])] += 1\n return hist\n\n\n@jit\ndef weighted_mean(img, hist):\n '''Receives 2-channel img and returns its weighted mean color for every channel.'''\n mean = np.empty(2)\n for i in xrange(img.shape[0]):\n for j in xrange(img.shape[1]):\n u, v = img[i, j]\n mean[0] += u * hist[(u, v)]\n mean[1] += v * hist[(u, v)]\n mean /= img.shape[0] * img.shape[1]\n return mean\n\n\nn_subimgs = 8 # 8 by each side, 64 overall\n\nav_clrs = np.empty((img_luv.shape[0], img_luv.shape[1], 2))\ni_sub_size = img_luv.shape[0] // n_subimgs\nj_sub_size = img_luv.shape[1] // n_subimgs\nfor i in xrange(0, img_luv.shape[0], i_sub_size):\n i_end = (i + i_sub_size) if i + i_sub_size < img_luv.shape[0] else img_luv.shape[0]\n for j in xrange(0, img_luv.shape[1], j_sub_size):\n j_end = (j + j_sub_size) if j + j_sub_size < img_luv.shape[1] else img_luv.shape[1]\n \n sub_img = img_luv[i:i_end, j:j_end, 1:]\n hist = hist2D(sub_img)\n mean = weighted_mean(sub_img, hist)\n #print i, j, mean\n av_clrs[i:i_end, j:j_end, :] = mean\n \n\ndiff_img = np.sqrt((img_luv[:, :, 1] - av_clrs[:, :, 0]) ** 2 + \\\n (img_luv[:, :, 2] - av_clrs[:, :, 1]) ** 2)\nshow_image(diff_img)\n\n# -----------------------------------------------------------","repo_name":"seva100/retinopathy","sub_path":"scripts/not_currently_used.py","file_name":"not_currently_used.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"911938057","text":"import pdb\nimport sys\nimport argparse\nimport re\nimport os\nimport io\n\nparser=argparse.ArgumentParser()\nDUMP_MEMORY_FILENAME = \"memdump0.mem\"\n\nparser.add_argument('-i', help='Input file', required=True)\nparser.add_argument('-o', help='Outputfile', required=True)\n\nargs=parser.parse_args()\ninput_file = args.i\noutput_file = args.o\n\nif not os.path.isfile(input_file):\n print(\"Input file doesn't exist\")\n sys.exit(os.EX_OSFILE)\n\nDETECT_REGEX = \"reg \\[(.*)\\] (\\S*) \\[(.*)\\];\\n initial begin\\n(( \\S*\\[\\S*\\] = \\S*;\\n)*) end\\n\"\n\nwith open(input_file, \"r\") as f:\n input_content = f.read()\n\nmemory_values = re.search(DETECT_REGEX, input_content)\nmemory_splits = [a.split(\"=\")[1].strip().split(\"'h\")[1][:-1] for a in memory_values.group(4).split(\"\\n\")[:-1]]\n\nnew_string = f\"reg [{memory_values.group(1)}] {memory_values.group(2)} [{memory_values.group(3)}];\\n\"\nnew_string += f'$readmemh(\"{DUMP_MEMORY_FILENAME}\", {memory_values.group(2)});\\n'\noutput_content = input_content.replace(input_content[memory_values.start(0):memory_values.end(0)], new_string)\n\nwith open(DUMP_MEMORY_FILENAME, \"w\") as f:\n f.write('\\n'.join(memory_splits) + '\\n')\n\nwith open(output_file, \"w\") as f:\n f.write(output_content)\n\n","repo_name":"dmalisani/novospace","sub_path":"ej2/adapt.py","file_name":"adapt.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26551358262","text":"# -*- coding: utf-8 -*-\nimport requests\nimport xmltodict \nimport json\nimport sys\nimport yaml\n\nwith open('../pythonscripts/external-data/config.yml', 'r') as file:\n data = yaml.safe_load(file)\n\nservice_url = data['istsos']['url']\ncur_db = data['istsos']['db']\n\ndef retrieve_datetime():\n url = service_url + cur_db + '?' \\\n 'request=GetObservation&' \\\n 'offering=temporary&' \\\n 'observedProperty=meteo&' \\\n 'responseFormat=application/json&' \\\n 'service=SOS&' \\\n 'version=1.0.0'\n request = requests.get(url)\n ans = request.json()\n\n try:\n ans[\"ExceptionReport\"]\n return False, ans\n except KeyError:\n return True, ans\n\ndef retrieve_measures(station, start, end, sensor):\n start = start + 'T00:00:00'\n end = end[:-2] + str(int(end[-2:]) + 1) + 'T00:00:00'\n url = service_url + cur_db + '?' \\\n 'procedure=' + station + '&' \\\n 'eventTime=' + start + '/' + end + '&' \\\n 'request=GetObservation&' \\\n 'offering=temporary&' \\\n 'observedProperty=' + sensor + '&' \\\n 'responseFormat=application/json&' \\\n 'service=SOS&' \\\n 'version=1.0.0'\n # request = requests.get(url)\n # ans = request.json()\n request = requests.get(url, headers={'content-type':'application/json'})\n ans = json.loads(request.text)\n try:\n ans[\"ExceptionReport\"]\n return False, ans\n except KeyError:\n return True, ans\n\nif (sys.argv[1] == 'stations'):\n check_datetime, answer = retrieve_datetime()\n if (check_datetime):\n all_datetimes = []\n datetimes = answer['ObservationCollection']['member']\n for datetime in datetimes:\n name = datetime['name']\n beginPos = datetime['samplingTime']['beginPosition']\n endPos = datetime['samplingTime']['endPosition']\n components = datetime['observedProperty']['component'][1:]\n i = 0\n new_components = []\n for comp in components:\n new_components.append(components[i].split('meteo:')[1])\n i += 1\n \n all_datetimes.append([name, beginPos, endPos, new_components])\n # print([name, beginPos, endPos, new_components])\n print(all_datetimes)\nelse:\n check_measures, full_measures = retrieve_measures(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n if (check_measures):\n geom = full_measures['ObservationCollection']['member'][0]['featureOfInterest']['geom']\n geom = geom.replace(\"'\", '#')\n full_measures['ObservationCollection']['member'][0]['featureOfInterest']['geom'] = geom\n print(full_measures)","repo_name":"georgepitsolis/blockchain-istsos-dapp","sub_path":"pythonscripts/visualizeData.py","file_name":"visualizeData.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"40990963596","text":"from database import add_contact, get_contacts, update_chat_history\r\nimport sqlite3\r\nfrom imports import *\r\n\r\nclass ContactFunctions:\r\n def add_contact(self):\r\n # Show a dialog box to enter the name of the new contact\r\n new_contact_name, ok = QInputDialog.getText(self, \"Add Contact\", \"Enter the name of the new contact:\")\r\n\r\n if ok and new_contact_name != \"\":\r\n # Check if the user exists in the database\r\n conn = sqlite3.connect('database.db')\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT * FROM users WHERE username=?\", (new_contact_name,))\r\n user = cursor.fetchone()\r\n conn.close()\r\n\r\n if user is None:\r\n # The user does not exist in the database\r\n QMessageBox.warning(self, \"Error\", \"User does not exist.\")\r\n elif new_contact_name in self.chat_history:\r\n # The user already exists in the conversation\r\n QMessageBox.warning(self, \"Error\", \"User already exists in your conversation.\")\r\n else:\r\n # Add the new contact to the contact list widget\r\n item = QListWidgetItem(new_contact_name)\r\n item.setSizeHint(item.sizeHint())\r\n self.contact_list_widget.addItem(item)\r\n\r\n # Create chat history for the new contact\r\n self.chat_history[new_contact_name] = []\r\n\r\n # Add the new contact to the database\r\n add_contact(self.username, new_contact_name)\r\n\r\n\r\n def delete_contact(self):\r\n # Get the selected contact's name\r\n selected_contact_name = self.contact_list_widget.currentItem().text()\r\n\r\n # Show a confirmation dialog box before deleting the contact\r\n reply = QMessageBox.question(\r\n self, \"Delete Contact\", f\"Are you sure you want to delete {selected_contact_name}?\",\r\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No\r\n )\r\n\r\n if reply == QMessageBox.Yes:\r\n # Remove the contact from the contact list widget\r\n selected_item = self.contact_list_widget.currentItem()\r\n self.contact_list_widget.takeItem(self.contact_list_widget.row(selected_item))\r\n\r\n # Remove the chat history of the selected contact\r\n del self.chat_history[selected_contact_name]\r\n\r\n # Clear the chat history widget\r\n self.chat_history_widget.clear()\r\n\r\n # Update the chat header with empty values\r\n chat_contact_name_label = self.chat_header_widget.findChild(QLabel)\r\n chat_contact_name_label.setText(\"\")\r\n\r\n # Switch to the conversation list if there are no contacts left\r\n if self.contact_list_widget.count() == 0:\r\n self.stacked_widget.setCurrentWidget(self.conversation_list_widget)\r\n else:\r\n # Select the first contact in the list\r\n self.contact_list_widget.setCurrentRow(0)\r\n self.show_conversation()\r\n\r\n # Delete the contact from the database\r\n conn = sqlite3.connect('database.db')\r\n cursor = conn.cursor()\r\n cursor.execute(\"DELETE FROM dashboard WHERE username=? AND contact=?\", (self.username, selected_contact_name))\r\n conn.commit()\r\n conn.close()\r\n else:\r\n return\r\n","repo_name":"softwarica-github/coursework2-ShirilMahato-1","sub_path":"contact_functions.py","file_name":"contact_functions.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21205803856","text":"from sklearn import svm\nfrom random import shuffle\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\ndef make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy\n\ndef plot_contours(ax, clf, xx, yy, **params):\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\nf = open(\"data/svmdata_e_test.txt\", \"r\")\na_test_mass = f.readlines()\nf.close()\na_test_mass = a_test_mass[1:]\nshuffle(a_test_mass)\n\nv = open(\"data/svmdata_e.txt\", \"r\")\na_mass = v.readlines()\nv.close()\na_mass = a_mass[1:]\nshuffle(a_mass)\n\ntranslator = {\n \"red\" : 0,\n \"green\" : 1\n}\n\ntest_val = len(a_mass)\nglobalX = []\nglobalY = []\n\nfor i in range(0, test_val):\n line = a_mass[i]\n line = line.rstrip(\"\\n\")\n arr = line.split(\"\\t\")\n currX = arr[1:3]\n currY = translator[arr[3]]\n globalX.append(currX)\n globalY.append(currY)\n\ntestX = []\ntestY = []\nfor i in range(0, len(a_test_mass)):\n line = a_test_mass[i]\n line = line.rstrip(\"\\n\")\n arr = line.split(\"\\t\")\n currX = arr[1:3]\n currY = translator[arr[3]]\n testX.append(currX)\n testY.append(currY)\n\nglobalX = np.array(globalX)\nglobalY = np.array(globalY)\nglobalX = globalX.astype(np.float)\nglobalY = globalY.astype(np.int)\n\ntestX = np.array(testX)\ntestY = np.array(testY)\ntestX = testX.astype(np.float)\ntestY = testY.astype(np.int)\n\nX = globalX\ny = globalY\n\nC = 0.2 # SVM regularization parameter\ngamma = 0.1\nmodels = (\n svm.SVC(kernel='rbf', gamma=gamma, C=C),\n svm.SVC(kernel='poly', degree=1, gamma=gamma, C=C),\n svm.SVC(kernel='poly', degree=2, gamma=gamma, C=C),\n svm.SVC(kernel='poly', degree=3, gamma=gamma, C=C),\n svm.SVC(kernel='poly', degree=4, gamma=gamma, C=C),\n svm.SVC(kernel='poly', degree=5, gamma=gamma, C=C),\n svm.SVC(kernel=\"sigmoid\", gamma=gamma))\nmodels = (clf.fit(X, y) for clf in models)\n# predictions = (clf.predict(testX) for clf in models)\n\n# for i in predictions:\n# print(accuracy_score(testY, i))\n# c_matrix = confusion_matrix(testY, i)\n# print(c_matrix)\n# print(\"***\")\n\ntitles = (\n 'RBF (Gauss)',\n 'Poly 1',\n 'Poly 2',\n 'Poly 3',\n 'Poly 4',\n 'Poly 5',\n 'sigmoid')\n\n# Set-up 4x2 grid for plotting.\nfig, sub = plt.subplots(4, 2)\nplt.subplots_adjust(wspace=0.4, hspace=0.4)\n\nX0, X1 = X[:, 0], X[:, 1]\nxx, yy = make_meshgrid(X0, X1)\n\nfor clf, title, ax in zip(models, titles, sub.flatten()):\n plot_contours(ax, clf, xx, yy,\n cmap=plt.cm.coolwarm, alpha=0.8)\n ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel('Sepal length')\n ax.set_ylabel('Sepal width')\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(title)\n\nplt.title('Current Gamma = ' + str(gamma) )\nplt.show()","repo_name":"ffoeta/university-ml-tasks","sub_path":"m-learn/l1/4_others.py","file_name":"4_others.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73345456781","text":"from nose.tools import ok_, eq_, raises\nfrom mongoom import *\nfrom mongoom.fields import ValidationError\nfrom mongoom.connection import get_connection, get_database\nfrom bson.objectid import ObjectId\nfrom bson import DBRef\nfrom datetime import datetime\n\nC = connect(\"test_db\", \"localhost\", 27017)\n\n\nclass User(Document):\n _index = {\"key_or_list\": [(\"name\", 1), (\"last_name\", 1)], \"unique\": True}\n name = Field(basestring, required=True)\n last_name = Field(basestring, required=True)\n created = Field(datetime, default=datetime.utcnow)\n\n\nclass Version(Document):\n name = Field(basestring)\n user = Field(User)\n path = Field(basestring)\n images = ListField(basestring)\n modified = Field(datetime, default=datetime.utcnow)\n\n\nclass Component(Document):\n _index = {\"key_or_list\": [(\"name\", 1), (\"created\", 1)], \"unique\": True}\n name = Field(basestring)\n user = Field(User)\n created = Field(datetime, default=datetime.utcnow)\n versions = ListField(Version)\n\n\nclass Container(Document):\n _index = {\"key_or_list\": [(\"name\", 1), (\"created\", 1)], \"unique\": True}\n name = Field(basestring, required=True)\n user = Field(User)\n created = Field(datetime, default=datetime.utcnow)\n components = ListField(Component)\n images = ListField(basestring)\n\n\nclass CheckListItem(EmbeddedDocument):\n text = Field(basestring)\n checked = Field(bool, default=False)\n\n\nclass CheckList(Document):\n title = Field(basestring)\n user = Field(User)\n items = ListField(CheckListItem)\n\n\ndef test_connect():\n '''Connection'''\n C.drop_database(\"test_db\")\n\n eq_(get_database(), c.test_db)\n\n c.test_db.test_col.insert({\"name\": \"test_entry\"})\n doc = c.test_db.test_col.find_one({\"name\": \"test_entry\"})\n ok_(all(field in doc for field in [\"_id\", \"name\"]))\n\n\ndef test_save():\n '''Save Document'''\n C.drop_database(\"test_db\")\n\n frank = User(\n name=\"Frank\",\n last_name=\"Footer\")\n\n eq_(frank.data[\"name\"], \"Frank\")\n eq_(frank.data[\"last_name\"], \"Footer\")\n ok_(\"created\" in frank.data)\n ok_(\"_id\" not in frank.data)\n\n frank.save()\n ok_(isinstance(frank._id, ObjectId))\n\n frank.last_name = \"Footers\"\n frank.save()\n\n\ndef test_find_one():\n '''Find One'''\n C.drop_database(\"test_db\")\n\n c.test_db.User.insert({\"name\": \"Frank\", \"last_name\": \"Footer\"})\n\n frank = User.find_one(name=\"Frank\")\n\n ok_(isinstance(frank._id, ObjectId))\n\n\ndef test_find():\n '''Find'''\n C.drop_database(\"test_db\")\n\n frank = User(\n name=\"Frank\",\n last_name=\"Footer\"\n ).save()\n\n bob = User(\n name=\"Bob\",\n last_name=\"Oob\"\n ).save()\n\n sam = User(\n name=\"Sam\",\n last_name=\"Samuelson\"\n ).save()\n\n users = User.find()\n ok_(all(user in [frank, bob, sam]) for user in users)\n\n\n@raises(ValidationError)\ndef test_missing_required():\n '''Missing Required Field'''\n C.drop_database(\"test_db\")\n\n #Try to save while missing a required field (last_name)\n User(name=\"Frank\").save()\n\n\ndef test_RefField():\n '''RefField'''\n C.drop_database(\"test_db\")\n\n frank = User(\n name=\"Frank\",\n last_name=\"Footer\"\n ).save()\n\n asset_a = Container(\n name=\"Asset A\",\n user=frank)\n\n ok_(asset_a.user is frank)\n\n\ndef test_ListField():\n '''ListField'''\n C.drop_database(\"test_db\")\n\n frank = User(\n name=\"Frank\",\n last_name=\"Footer\"\n ).save()\n\n project_a = Container(\n name=\"Project A\",\n user=frank)\n\n project_a.images.append(\"path/to/image\")\n project_a.images.extend([\"path/to/image2\", \"path/to/image3\"])\n eq_(project_a.images.value,\n [\"path/to/image\", \"path/to/image2\", \"path/to/image3\"])\n eq_(project_a.images[0], \"path/to/image\")\n eq_(project_a.images[-1], \"path/to/image3\")\n eq_(project_a.images[1:], [\"path/to/image2\", \"path/to/image3\"])\n\n\ndef test_deref():\n '''Test dereferencing of ListField descriptor'''\n C.drop_database(\"test_db\")\n\n frank = User(\n name=\"Frank\",\n last_name=\"Footer\"\n ).save()\n\n asset_a = Container(\n name=\"Asset A\",\n user=frank).save()\n\n model_a = Component(\n name=\"Awesome Model\",\n user=frank).save()\n\n master = Version(\n name=\"master\",\n user=frank,\n path=\"path/to/file.ma\").save()\n\n v001 = Version(\n name=\"v001\",\n user=frank,\n path=\"path/to/file.ma\").save()\n\n asset_a.components += model_a\n model_a.versions += master, v001\n\n asset_a.save()\n model_a.save()\n\n ok_(all(isinstance(v, Version) for v in model_a.versions))\n ok_(all(isinstance(c, Component) for c in asset_a.components))\n\n\ndef test_ref():\n '''Test Reference ability of Field and ListField'''\n C.drop_database(\"test_db\")\n\n user_a = User(name=\"User\", last_name=\"A\").save()\n comp_a = Container(name=\"Component A\", user=user_a).save()\n comp_b = Component(name=\"Component B\", user=user_a).save()\n comp_a.components += comp_b\n comp_a.save()\n\n ok_(isinstance(comp_a._data['user'], DBRef))\n ok_(all(isinstance(c, DBRef) for c in comp_a._data['components']))\n\n\ndef test_embed():\n '''Test Embedded Document'''\n\n C.drop_database(\"test_db\")\n\n user_a = User(name=\"User\", last_name=\"A\").save()\n clist = CheckList(title=\"New Checklist\", user=user_a).save()\n clist_item_a = CheckListItem(text=\"Item A\")\n\n clist.items += clist_item_a\n clist.save()\n\n # Change text through clist_item_a's text descriptor\n clist_item_a.text = \"Item A Changed\"\n eq_(clist._data[\"items\"][0][\"text\"], \"Item A Changed\")\n\n # Change text through __getitem__ access\n clist.items[0].text = \"Item A Changed Twice\"\n eq_(clist._data[\"items\"][0][\"text\"], \"Item A Changed Twice\")\n\n\ndef test_index():\n db = C[\"test_db\"]\n index_kwargs = User.index()\n index_name = \"_\".join(\n [str(item) for key in index_kwargs[\"key_or_list\"] for item in key])\n ok_(index_name in db.User.index_information())\n","repo_name":"danbradham/mongoom","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18695581932","text":"from sys import stdin,stdout\nfrom collections import *\nfrom math import *\nst=lambda:list(stdin.readline().strip())\nli=lambda:list(map(int,stdin.readline().split()))\nmp=lambda:map(int,stdin.readline().split())\ninp=lambda:int(stdin.readline())\npr=lambda n: stdout.write(str(n)+\"\\n\")\n\nmod=1000000007\nINF=float('inf')\n\ndef solve():\n\n n=inp()\n x=100\n ans=0\n while x1:\n self.SetHuffmanPath(tree[1],dict,path+'0')\n else:\n dict[tree[1][0].tag[0]]=tree[1][0].value\n tree[2][0].value=path+'1'\n if len(tree[2])>1:\n self.SetHuffmanPath(tree[2],dict,path+'1')\n else:\n dict[tree[2][0].tag[0]]=tree[2][0].value\n\n def HuffmanFrequence(self, node):\n try:\n return node[0].value\n except:\n return node.value\n\n def __init__(self, root, selftree=None):\n dialog=Pmw.PromptDialog(root, title='Huffman Encoding',\n label_text='Text to compress:',\n entryfield_labelpos='w',\n entry_font=('Courier', 10),\n buttons=('OK', 'Cancel'),\n hull_width=40)\n if dialog.activate()!='OK': return\n text=dialog.get()\n\n dialog = Pmw.TextDialog(root, title = 'Huffman Encoding', text_font=('Courier', 12))\n dialog.maxsize(400,400)\n dialog.insert('end', 'Text to compress:\\n'+text)\n dialog.insert('end', '\\n\\nUncompressed size: %i\\n' % (8*len(text)))\n\n tree=[Node('Huffman')]\n dict={}\n for c in text:\n if not dict.has_key(c):\n dict[c]=1\n else:\n dict[c]=dict[c]+1\n for key in dict.keys():\n node=[Node(value=dict[key], tag=key)]\n node[0].data='frequence=%i' % dict[key]\n tree.append(node)\n\n frames=[]\n newframe(frames,tree)\n while len(tree)>3:\n MergeSort(tree,1)\n newframe(frames,tree)\n combined_frequence=self.HuffmanFrequence(tree[1])+self.HuffmanFrequence(tree[2])\n tree[1][0].color='green'\n tree[2][0].color='green'\n tree[1]=[Node(combined_frequence), tree[1], tree[2]]\n tree[1][0].data='frequence=%i' % combined_frequence\n del tree[2]\n newframe(frames,tree)\n \n tree[1][0].color='green'\n tree[2][0].color='green'\n newframe(frames,tree)\n\n dict={}\n self.SetHuffmanPath(tree,dict)\n newframe(frames,tree)\n ShowFrames(root,frames,showTree, title='Huffman Encoding Tree',save=selftree)\n dialog.insert('end', '\\nCompression Rules:\\n')\n for key in dict.keys():\n dialog.insert('end', \"\\t'\"+key+\"'-> \"+dict[key]+\"\\n\")\n\n dialog.insert('end', '\\nCompressed Text:\\n')\n size=0\n for c in text:\n dialog.insert('end', dict[c]+\" \") \n size=size+len(dict[c])\n\n dialog.insert('end', '\\n\\nCompressed Size: %i\\n' % (size))\n dialog.configure(text_state = 'disabled')\n return\n\n","repo_name":"mdipierro/algorithms-animator","sub_path":"src/csc321example_huffman.py","file_name":"csc321example_huffman.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"74303985101","text":"# -*- coding: utf-8 -*-\nimport re\nimport requests\nimport w3lib\nimport csv\n\nfrom parsel import Selector\nfrom pprint import pprint\n\ntry:\n from urllib.parse import urljoin\nexcept ImportError:\n from six.moves.urllib.parse import urljoin\n\n\nfile = open(\"douyin_data.csv\", \"w\")\n\ncsv_file = csv.writer(file)\ncsv_file.writerow([\n 'Nickname', 'Douyin_id', 'Avatar', 'Verify_info', 'Intro',\n 'Location', 'Constellation', 'Following', 'Follower',\n 'Like_count', 'Entry_count', 'Entry_likes'])\n\n\ndef fetch_data(url, proxy=None, rain_num=2):\n print(\"Loading:\", url)\n heads = {\n 'Accept': 'text/*, application/xml',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) \\\n AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/67.0.3396.62 Mobile Safari/537.36',\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Host\": \"www.douyin.com\",\n \"Upgrade-Insecure-Requests\": \"1\"\n }\n try:\n html = requests.get(url, headers=heads).text\n except Exception as e:\n print(\"Loading Faild:\", e.reason)\n html = None\n if rain_num > 0:\n if hasattr(e, 'code') and 500 <= e.code < 600:\n return fetch_data(url, rain_num - 1)\n return html\n\n\ndef fetch_info(uid):\n url = \"https://www.douyin.com/share/user/%s\" % uid\n body = fetch_data(url)\n xbody = Selector(text=body)\n # item = dict()\n\n try:\n error_msg = xbody.xpath(\n \"//div[@class='error-text']/p/text()\").extract_first()\n except Exception as e:\n error_msg = ''\n if error_msg == '页面不见啦~':\n print('----------用户不存在!----------')\n return\n\n try:\n nickname = xbody.xpath(\n \"//p[@class='nickname']/text()\").extract_first()\n except:\n nickname = ''\n\n try:\n entry_count = xbody.xpath(\n \"//div[@class='user-tab active tab get-list']/span\").extract_first()\n entry_count = re.findall(r'>([\\s\\S]+?)<', entry_count)\n entry_count = jiexi(entry_count).strip()\n except:\n entry_count = ''\n\n try:\n entry_likes = xbody.xpath(\n \"//div[@class='like-tab tab get-list']/span\").extract_first()\n entry_likes = re.findall(r'>([\\s\\S]+?)<', entry_likes)\n entry_likes = jiexi(entry_likes).strip()\n except:\n entry_likes = ''\n\n try:\n douyin_id = xbody.xpath(\"//p[@class='shortid']\").extract_first()\n douyin_id = re.findall(r'>([\\s\\S]+?)<', douyin_id)\n douyin_id = jiexi(douyin_id).replace(u\"抖音ID:\", '').strip()\n except:\n douyin_id = ''\n\n try:\n verify_info = xbody.xpath(\n \"//span[@class='info']/text()\").extract_first().strip()\n except Exception as e:\n verify_info = ''\n\n try:\n following = xbody.xpath(\n \"//span[contains(@class,'focus block')]/span[@class='num']\")\\\n .extract_first()\n following = re.findall(r'>([\\s\\S]+?)<', following)\n following = jiexi(following)\n except:\n following = ''\n\n try:\n follower = xbody.xpath(\n \"//span[contains(@class,'follower block')]/span[@class='num']\")\\\n .extract_first()\n follower = re.findall(r'>([\\s\\S]+?)<', follower)\n follower = jiexi(follower)\n except:\n follower = ''\n\n try:\n like_count = xbody.xpath(\n \"//span[contains(@class,'liked-num block')]/span[@class='num']\")\\\n .extract_first()\n like_count = re.findall(r'>([\\s\\S]+?)<', like_count)\n like_count = jiexi(like_count)\n except:\n like_count = ''\n\n try:\n intro = xbody.xpath(\"//p[@class='signature']/text()\").extract_first()\n except:\n intro = ''\n\n try:\n avatar = xbody.xpath(\"//img[@class='avatar']/@src\").extract_first()\n except:\n avatar = ''\n\n try:\n location = xbody.xpath(\n \"//span[@class='location']/text()\").extract_first()\n except Exception as e:\n location = ''\n\n try:\n constellation = xbody.xpath(\n \"//span[@class='constellation']/text()\").extract_first()\n except Exception as e:\n constellation = ''\n\n # item['douyin_id'] = douyin_id\n # item['nickname'] = nickname\n # item[\"follower\"] = follower\n # item[\"like_count\"] = like_count\n # item[\"following\"] = following\n # item['entry_count'] = entry_count\n # item['entry_likes'] = entry_likes\n # item['verify_info'] = verify_info\n # item['intro'] = intro\n # item['avatar'] = avatar\n # item['location'] = location\n # item['constellation'] = constellation\n # pprint(item)\n\n if douyin_id:\n csv_file.writerow([\n nickname, douyin_id, avatar, verify_info, intro, location,\n constellation, following, follower, like_count, entry_count,\n entry_likes])\n\n\ndef jiexi(lists):\n pat = {\n u\"\\ue60d\": 0,\n u\"\\ue603\": 0,\n u\"\\ue616\": 0,\n u\"\\ue60e\": 1,\n u\"\\ue618\": 1,\n u\"\\ue602\": 1,\n u\"\\ue605\": 2,\n u\"\\ue610\": 2,\n u\"\\ue617\": 2,\n u\"\\ue611\": 3,\n u\"\\ue604\": 3,\n u\"\\ue61a\": 3,\n u\"\\ue606\": 4,\n u\"\\ue619\": 4,\n u\"\\ue60c\": 4,\n u\"\\ue60f\": 5,\n u\"\\ue607\": 5,\n u\"\\ue61b\": 5,\n u\"\\ue61f\": 6,\n u\"\\ue612\": 6,\n u\"\\ue608\": 6,\n u\"\\ue61c\": 7,\n u\"\\ue60a\": 7,\n u\"\\ue613\": 7,\n u\"\\ue60b\": 8,\n u\"\\ue61d\": 8,\n u\"\\ue614\": 8,\n u\"\\ue615\": 9,\n u\"\\ue61e\": 9,\n u\"\\ue609\": 9,\n \"w\": \"w\",\n \".\": \".\"\n }\n _li = list()\n for i in lists:\n if str(i).strip():\n i = i.replace(u'', \"\").strip()\n i = i.replace(u'', \"\").strip()\n i = i.replace(u'', \"\").strip()\n i = pat.get(i, i)\n _li.append(str(i))\n return \"\".join(_li)\n\n\nif __name__ == '__main__':\n uids = [\n \"57720812347\", \"93046013277\", \"72096309936\", \"60637177764\",\n \"69914084602\", \"72722865756\", \"58486060366\", \"95433824498\",\n \"77267568314\", \"52616983119\", \"61141281259\", \"58900737309\"\n ]\n # uids = [\"84990209480\"]\n # for uid in uids:\n # fetch_info(uid)\n\n for i in list(range(1, 100)):\n fetch_info(i)\n # fetch_info(50)\n\nfile.close()\n","repo_name":"chandchen/net-pandas","sub_path":"douyin.py","file_name":"douyin.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35461399672","text":"import torch\nimport torch.optim as optim\nfrom actor_critic import Actor, Critic\n\n\nclass Agent:\n def __init__(self, args, state_size, action_size):\n n_in, n_mid, n_out = state_size, 50, action_size\n self.args = args\n self.actor = Actor(n_in, n_mid, n_out)\n self.critic = Critic(n_in, n_mid)\n self.actor_optim = optim.Adam(self.actor.parameters(), lr=args.lr_actor)\n self.critic_optim = optim.Adam(self.critic.parameters(), lr=args.lr_critic)\n\n def get_action_prob(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0) # state : [1, 4]\n policy = self.actor(state)\n action = policy.multinomial(num_samples=1)\n action = action.item()\n log_prob = torch.log(policy.squeeze(0)[action])\n\n return action, log_prob\n\n def get_v_value(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0) # state : [1, 4]\n v_value = self.critic(state)\n\n return v_value\n\n def update_critic(self, v_value, reward, next_v_value):\n with torch.no_grad():\n td_target = reward + self.args.gamma * next_v_value\n advantage = td_target - v_value\n\n loss_critic = 0.5 * (td_target - v_value) ** 2\n\n self.critic_optim.zero_grad()\n loss_critic.backward()\n self.critic_optim.step()\n\n return advantage, loss_critic.item()\n\n def update_actor(self, log_prob, advantage):\n loss_actor = -log_prob * advantage\n\n self.actor_optim.zero_grad()\n loss_actor.backward()\n self.actor_optim.step()\n\n return loss_actor.item()","repo_name":"pyCERN/RL_Algorithms","sub_path":"A2C/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31026049519","text":"import asyncio\nimport signal\nfrom contextlib import suppress\n\nimport pulsectl_asyncio\n\n\nasync def listen(pulse: pulsectl_asyncio.PulseAsync, source_name: str):\n async for level in pulse.subscribe_peak_sample(source_name, rate=5):\n print('\\x1b[2K\\x1b[0E', end='') # return to beginning of line\n num_o = round(level * 80)\n print('O' * num_o + '-' * (80-num_o), end='', flush=True)\n\n\nasync def main():\n \"\"\"\n Monitor output level of the default sink.\n \"\"\"\n async with pulsectl_asyncio.PulseAsync('peak-listener') as pulse:\n # Get name of monitor_source of default sink\n server_info = await pulse.server_info()\n default_sink_info = await pulse.get_sink_by_name(server_info.default_sink_name)\n source_name = default_sink_info.monitor_source_name\n\n # Start listening/monitoring task\n listen_task = loop.create_task(listen(pulse, source_name))\n\n # Schedule listen_task to be cancelled after 10 seconds\n # Alternatively, the PulseAudio event subscription can be ended by breaking/returning from the `async for` loop\n loop.call_later(5, listen_task.cancel)\n\n # register signal handlers to cancel listener when program is asked to terminate\n for sig in (signal.SIGTERM, signal.SIGHUP, signal.SIGINT):\n loop.add_signal_handler(sig, listen_task.cancel)\n\n with suppress(asyncio.CancelledError):\n await listen_task\n print()\n\n\n# Run event loop until main_task finishes\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n","repo_name":"mhthies/pulsectl-asyncio","sub_path":"examples/subscribe_peak_example.py","file_name":"subscribe_peak_example.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"47"} +{"seq_id":"22174456716","text":"from scipy.spatial import distance\nfrom imutils import face_utils\nimport playsound\nimport imutils\nimport dlib\nimport cv2\n\n# Fungsi untuk menghitung aspek rasio mata(EAR)\ndef eye_aspect_ratio(eye):\n a = distance.euclidean(eye[1], eye[5])\n b = distance.euclidean(eye[2], eye[4])\n c = distance.euclidean(eye[0], eye[3])\n ear = (a + b) / (2 * c)\n return ear\n\n# Fungsi untuk menyalakan alarm\ndef alarm():\n playsound.playsound(\"tone.mp3\")\n\n# Batas EAR\near_treshold = 0.18\n# Batas jumlah frame setelah melewati batas EAR\nframe_counter_tresh = 20\nframe_counter = 0\n\n# Menggunakan modul pendeteksi yang sudah dilatih dari file (.xml/.dat)\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\ndetect = dlib.get_frontal_face_detector()\npredict = dlib.shape_predictor(\"models/shape_predictor_68_face_landmarks.dat\")\n\n# Mengamil index landmark wajah pada bagian mata kanan dan kiri\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"right_eye\"]\n\n# Memulai pengambilan gambar\ncap = cv2.VideoCapture(1)\n\n# Loop yang dijalankan pada setiap frame\nwhile True:\n # Mengambil data dari kamera, lalu mengubahnya menjadi grayscale\n ret, frame = cap.read()\n height = int(cap.get(4))\n width = int(cap.get(3))\n frame = imutils.resize(frame, width=720)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # mendeteksi wajah pada frame grayscale\n sbj = detect(gray, 0)\n\n # Loop pendeteksi wajah\n for subject in sbj:\n # Konversi koordinat titik wajah menjadi NumPy array\n shape = predict(gray, subject)\n shape = face_utils.shape_to_np(shape)\n # Mendapatkan koordinat mata kanan dan kiri\n left = shape[lStart:lEnd]\n right = shape[rStart:rEnd]\n leftEAR = eye_aspect_ratio(left)\n rightEAR = eye_aspect_ratio(right)\n ear = (leftEAR + rightEAR) / 2.0\n # Menggambarkan garis di sekeliling mata berdasarkan koordinat\n leftHull = cv2.convexHull(left)\n rightHull = cv2.convexHull(right)\n cv2.drawContours(frame, [leftHull], -1, (255,255,255), 1)\n cv2.drawContours(frame, [rightHull], -1, (255,255,255), 1)\n # Pendeteksi wajah\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h ), (0, 255, 0), 2)\n\n # Mengubah kotak deteksi menjadi merah\n if frame_counter >= frame_counter_tresh:\n cv2.rectangle(frame, (x, y), (x + w, y + h ), (0, 0, 255), 2)\n\n # Membatasi ear dengan ear_treshold\n if ear < ear_treshold:\n # Menghitung frame jika melewati batas/tresholdq\n frame_counter += 1\n print(frame_counter, ear)\n\n # Kondisi menambahkan peringatan berupa text\n if frame_counter >= frame_counter_tresh:\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, \"************************ HEY BANGUN ************************\", (10, 30),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n # Kondisi untuk menyalakan alarm\n if frame_counter > 23:\n print(\"====!!!warning!!!====\")\n alarm()\n else:\n frame_counter = 0\n # Menampilkan gambar yang sudah diproses\n cv2.imshow(\"Pendeteksi Kesadaran Pengendara\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # Kondisi untuk keluar dari program\n if key == ord(\"q\"):\n break\n# Membersihkan sisa dari jalannya program\ncv2.destroyAllWindows()\ncap.release()","repo_name":"mzakiwidianto/TUBES_ALPRO","sub_path":"Tugas_Besar.py","file_name":"Tugas_Besar.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23075738525","text":"\n\nimport numpy as np\nimport time\n\nfrom pmdarima.arima import ARIMA\nfrom BHT_ARIMA.util.utility import get_acc, nrmse\n\ndef run_ARIMA(data, param):\n\n\n order = param['order'] \n testsize = param['testsize']\n\n T = data.shape[-1]\n T_test = int((T * testsize) // 1)\n result_full = np.zeros([data.shape[0], T_test])\n\n total_time = 0\n n_round = 0\n\n\n for i in range(T_test):\n\n y = data[..., i:T-T_test+i].copy()\n n_round += 1\n start = time.time()\n\n for j in range(y.shape[0]):\n \n model = ARIMA(order , suppress_warnings = True,enforce_stationarity=True)\n result = model.fit_predict(y[j], n_periods=1)\n result_full[j, i] = result[..., -1]\n\n end = time.time()\n total_time = total_time + (end - start)\n\n true_value = data[..., -T_test:]\n\n\n stat = {}\n stat['acc'] = get_acc(result_full, true_value)\n stat['nrmse'] = nrmse(result_full, true_value)\n stat['ave_time'] = total_time/n_round\n\n\n return(stat)\n \n \n","repo_name":"shtepkaa/ML_2020_project_Group-30","sub_path":"models/model_ARIMA.py","file_name":"model_ARIMA.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"19002691168","text":"class Solution:\n def sumSubseqWidths(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n MOD = 1000000007\n memo = {0: 0}\n\n def choose(i):\n if i in memo:\n return memo[i]\n else:\n out = ((choose(i - 1) + 1) * 2 - 1) % MOD\n memo[i] = out\n return out\n\n l = len(A)\n d = collections.defaultdict(int)\n for e in A:\n d[e] += 1\n\n A2 = sorted([(v, c) for v, c in d.items()])\n pre = 0\n out = 0\n for v, c in A2:\n out = (out + (choose(pre) - choose(l - pre - c)) * v * choose(c)) % MOD\n pre += c\n return out\n","repo_name":"devilhtc/leetcode-solutions","sub_path":"0x037b_891.Sum_of_Subsequence_Widths/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"3715445232","text":"#The computer will think of a random number from 1 to 10 as secret number.\r\nimport random\r\nrandom_number=random.randint(1,10)\r\n\r\n#Then ask you ( Player ) to guess the number and store as guess number.\r\nguessed_number=input('Guess any number between 1 to 10: ')\r\nguessed_number1=int(guessed_number)\r\n\r\n#Compare the guess number with the secret number.\r\nif (guessed_number1==random_number):\r\n #If the player guesses the right number he wins, so print player wins and computer lose.\r\n print('player wins and computer lose')\r\n print('Congrats!')\r\n\r\nelse:\r\n #If the player guesses the wrong number, then he loses so print player lose and computer wins.\r\n print('player lose and computer wins')\r\n print('Oh! Try Again.')\r\n","repo_name":"AmanMudgal2701/Second-Python-Project","sub_path":"Day - 5 (Number game).py","file_name":"Day - 5 (Number game).py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27150168463","text":"from django import forms\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django.urls import reverse_lazy\nfrom datetime import datetime, timedelta\nfrom . import gCalendar\n\npersons = [\n (1, \"All\"),\n (2, \"Jamie\"),\n (3, \"Chris\"),\n (4, \"Josey\"),\n (5, \"Toby\"),\n (6, \"Other\"),\n]\n\n\nclass startForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = \"POST\"\n self.helper.add_input(Submit(\"submit\", \"Submit\", css_class=\"btn btn-success\"))\n self.furthestEvent = datetime.strptime(\n gCalendar.furthestEvent(), \"%Y-%m-%d %H:%M:%S\"\n )\n self.furthestEvent = self.furthestEvent.date()\n self.fields[\"start_planning_from\"].widget.attrs.update(\n {\n \"min\": self.furthestEvent + timedelta(days=1),\n }\n )\n self.helper.attrs.update({\"id\": \"start_form_id\"})\n\n start_planning_from = forms.DateField(\n widget=forms.DateInput(\n attrs={\n \"type\": \"date\",\n \"style\": \"max-width: 150px\",\n }\n )\n )\n\n\nclass MealForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = \"POST\"\n self.helper.add_input(Submit(\"submit\", \"Submit\", css_class=\"btn btn-success\"))\n self.helper.attrs.update({\"id\": \"meal_form_id\"})\n\n meal = forms.CharField(required=True)\n who_is_eating = forms.MultipleChoiceField(\n required=True, choices=persons, widget=forms.CheckboxSelectMultiple()\n )\n other = forms.CharField(label=False, required=False)\n\n\nclass EditMealForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = \"POST\"\n self.helper.add_input(Submit(\"submit\", \"Submit\", css_class=\"btn btn-success\"))\n self.helper.attrs.update({\"id\": \"meal_form_id\"})\n\n meal = forms.CharField(required=True)\n who_is_eating = forms.MultipleChoiceField(\n required=True, choices=persons, widget=forms.CheckboxSelectMultiple()\n )\n other = forms.CharField(label=False, required=False)\n\n\nclass deleteMeal(forms.Form):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = \"POST\"\n self.helper.attrs.update({\"id\": \"delete_form_id\"})\n self.helper.add_input(Submit(\"submit\", \"Delete\", css_class=\"btn btn-danger\"))\n\n confirm = forms.BooleanField(\n error_messages={\"required\": \"You must confirm the deletion\"},\n label=\"Confirm\",\n )\n","repo_name":"xhemals/MealPlanning","sub_path":"MealPlanning/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2997409054","text":"import os\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nld = {}\nif os.path.exists(\"README.md\"):\n ld['filename'] = \"README.md\"\n ld['content_type'] = \"text/markdown\"\nelif os.path.exists(\"readme_src.org\"):\n ld['filename'] = \"readme_src.org\"\n ld['content_type'] = \"text/plain\"\n\nwith open(file=ld['filename'], mode=\"r\") as readme_f:\n ld['data'] = readme_f.read()\n\nsetup(\n\n # Metadata\n name=\"fabular\",\n author=\"Philipp Denzel\",\n author_email=\"phdenzel@gmail.com\",\n version=\"0.0.dev2\",\n description=(\"A command-line chat app for secure communication \"\n \"between you and your friends!\"),\n long_description=ld['data'],\n long_description_content_type=ld['content_type'],\n license='GNU General Public License v3.0',\n url=\"https://github.com/phdenzel/fabular\",\n keywords=\"command line, chat, secure, encryption, server, client\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: POSIX',\n 'Environment :: Console',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Communications',\n 'Topic :: Communications :: Chat',\n 'Topic :: Security',\n ],\n\n # Package\n install_requires=['cryptography', 'pyngrok'],\n package_dir={\"\": \"src\"},\n packages=find_packages(where='src'),\n py_modules=['fabular'],\n python_requires=\">=3.6\",\n entry_points={\n 'console_scripts': [\n 'fabular = fabular.__main__:main',\n ],\n },\n\n)\n","repo_name":"phdenzel/fabular","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25018760349","text":"import os\nimport logging as log\nimport json\nimport boto3\n\n\ndef append_data_to_s3(existing_data, new_data, source):\n s3_client = boto3.client('s3')\n updated_data = existing_data + new_data # You may need to format this data as per your requirements\n s3_client.put_object(Bucket=os.environ['BUCKET_NAME'], Key=source, Body=updated_data)\n\ndef lambda_handler(event, context):\n\n log.info(event, context)\n\n sqs_client = boto3.client(\"sqs\")\n s3_client = boto3.client(\"s3\")\n\n queue_url = os.environ[\"QUEUE_URL\"]\n\n # Receive message from SQS queue\n response = sqs_client.receive_message(\n QueueUrl=queue_url,\n AttributeNames=[\"SentTimestamp\"],\n MaxNumberOfMessages=1,\n MessageAttributeNames=[\"All\"],\n VisibilityTimeout=0,\n WaitTimeSeconds=0,\n )\n\n if 'Messages' in response:\n for message in response['Messages']:\n # Extract the new data from the SQS message\n new_data = json.loads(message['Body'])\n \n log.info(f'new data just arrived: {new_data}')\n # Get the existing data from S3\n existing_object = s3_client.get_object(Bucket=os.environ[\"BUCKET_NAME\"], Key=os.environ[\"S3_KEY\"])\n existing_data = existing_object['Body'].read().decode('utf-8')\n\n # Append new data to the existing data\n append_data_to_s3(existing_data, new_data, os.environ[\"S3_KEY\"])\n\n # Delete the SQS message to remove it from the queue\n sqs_client.delete_message(\n QueueUrl=os.environ['QUEUE_URL'],\n ReceiptHandle=message[\"ReceiptHandle\"]\n )\n\n\n \n\n\n\n \n \n \n \n\n\n\n","repo_name":"pklaudat/data-pipeline-aws-sqs-infra","sub_path":"lambdas/ingest_data/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1105074755","text":"import datetime\nimport os\n\n\nclass CSVLog:\n \"\"\"\n Creates a log object that can append data to a cvs file. When a new log is written to file, it is located in\n 'log/{name}-{creation time}.csv'.\n \"\"\"\n def __init__(self, name, *titles):\n \"\"\"\n Creates a new CSVLog, which will save to a given name.\n\n :param name: The name which will be used when the log is saved.\n :param titles: An optional list of strings to use as titles for the columns in the log.\n :return: The new CSVLog.\n \"\"\"\n self.name = name\n self.content = \",\".join(titles)\n\n def append(self, *args):\n \"\"\"\n Appends new data to the log.\n\n :param args: The data to append to the log. Will ultimately manifest as a line a csv file.\n \"\"\"\n # Turn all arguments into strings\n data = []\n for arg in args:\n data.append(str(arg))\n\n # Add a new line if content already exists in the file.\n self.content += '\\n' if self.content != '' else ''\n\n # Add the new data.\n self.content += \",\".join(data)\n\n def write(self):\n \"\"\"\n Writes the current log to a csv file. The file will be saved in 'log/{name}-{creation time}.csv'.\n \"\"\"\n # Make sure the directory for output exists\n if not os.path.exists('log'):\n os.makedirs('log')\n\n f = open('log/%s-%s.csv' % (self.name, datetime.datetime.now().strftime('%m-%d-%y-%H-%M')), 'w+')\n\n f.write(self.content)","repo_name":"willdzeng/ticket_to_ride","sub_path":"logging/csv_log.py","file_name":"csv_log.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"20166670220","text":"\"\"\"\nParameterized quantum circuits for supervised learning\n\n@author: Vince Hasse\n@author: Martijn Swenne\nLast edited:\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport pandas as pd\nfrom math import pi\nimport numpy as np\nimport random\nimport sympy\nimport math\nimport cirq\nimport time\nimport os\n\n# Initialisation of some parameters\nseed = 239 # Random seed\nnp.random.seed(seed) # Initialise random seed\nnr_qubits = 3 # Number of qubits\nnr_layers = 4 # Number of layers\nbatch_size = 10 # Number of datapoints per training batch\nshots = 2 # Number of shots per datapoint\niterations = 1 # Number of iterations\nfile = \"data{}.txt\" # Base filename for writing away data\nkey = \"\" # String that contains all qubit-keynames\nfor i in range(nr_qubits):\n key += str(i)\n \n# U_phi gate needed for fancy_U\ndef U_phi(q, W):\n # Apply rotation on every qubit based on datapoint\n for i in range(len(q)):\n rot = cirq.ZPowGate(exponent=W[i]/pi) \n yield rot(q[i])\n # Apply controlled-rotation on every qubit-pair based on datapoint\n for i in range(len(q)-1):\n for j in range(i+1,len(q)):\n rot = cirq.ZPowGate(exponent=((pi-W[i])*(pi-W[j]))/pi)\n yield rot.on(q[j]).controlled_by(q[i])\n\n# U_phi initialises the qubits in a state based on the current datapoint\ndef fancy_U(q, W):\n # Apply a Hadamard on every qubit\n for i in range(len(q)):\n yield cirq.H(q[i])\n # Apply U_phi\n yield U_phi(q, W)\n # Apply a Hadamard on every qubit\n for i in range(len(q)):\n yield cirq.H(q[i])\n # Apply U_phi\n yield U_phi(q, W)\n\n# W_theta_p applies one layer of W_theta\ndef W_theta_p(q, theta):\n # Apply a controlled-Z gate on every qubit pair (i,i+1) based on theta\n for i in range(len(q)):\n yield cirq.CZ.on(q[(i+1)%len(q)],q[i])\n # Apply a Y and Z rotation on every qubit based on theta\n for i in range(len(q)):\n rot_z = cirq.ZPowGate(exponent=theta[2*i]/pi)\n rot_y = cirq.Ry(theta[2*i+1])\n yield rot_z(q[i])\n yield rot_y(q[i])\n\n# W_theta applies a mapping from the qubits in the state based on the current\n# datapoint to a quantum state that, when measured, can be mapped to a label\ndef W_theta(q, theta, layers):\n # Apply a Y and Z rotation on every qubit based on theta\n for i in range(len(q)):\n rot = cirq.ZPowGate(exponent = theta[2*i]/pi)\n rot = cirq.Ry(theta[2*i + 1])\n yield rot(q[i])\n yield rot(q[i])\n # Apply \"layers\" amount of layers using W_theta_p\n for i in range(1,layers+1):\n yield W_theta_p(q, theta[range(6*(i),6*(i+1))])\n \n# Measures all qubits\ndef measure(q):\n for i in range(len(q)):\n yield cirq.measure(q[i], key=str(i))\n \n# Builds the variational circuit\ndef circuit(q, W, theta, layers):\n yield fancy_U(q,W)\n yield W_theta(q, theta, layers)\n yield measure(q)\n\n# Returns the absolute loss of the predictions\ndef abs_loss(labels, predictions):\n loss = 0 \n pred = np.round(predictions)\n for l, p in zip(labels, pred):\n loss = loss + np.abs(l - p)\n loss = loss / len(labels)\n return loss\n\n# Returns the squared loss of the predictions\ndef squared_loss(labels, predictions):\n loss = 0\n for l, p in zip(labels, predictions):\n loss = loss + (l - p)**2\n loss = loss/ len(labels)\n return loss\n\n# Returns the accuracy over the predicted labels of a dataset\ndef accuracy(labels, predictions):\n loss = 0\n for l, p in zip(labels, predictions):\n if abs(l - p) < 1e-5:\n loss = loss + 1\n loss = loss / len(labels)\n return loss\n\n# Returns a probability of seeing label 1 for a datapoint\ndef probability_estimate(results):\n counter = (results.multi_measurement_histogram(keys=\"012\"))\n p_hold = 0\n for j in counter:\n if j.count(1) % 2 == 1:\n p_hold += counter[j] \n return p_hold/shots\n\n# TODO\ndef assign_label(p, Y, b):\n Y_pm = 2 * Y - 1\n labels = np.ones([len(p),])*-1\n for i in range(len(p)):\n if (p[i] > ((1 - p[i]) - b)):\n labels[i] = 1\n return labels \n\n# TODO\ndef R(y, probs, b):\n p = 1 - probs\n y = 2*y - 1\n loss = 0\n R = 200\n for k in range(len(y)):\n if y[k] == 1:\n x = (math.sqrt(R)*(.5 - (probs[k] - y[k]*(b/2))))/math.sqrt(2*probs[k]*p[k])\n else:\n x = (math.sqrt(R)*(.5 - (p[k] - y[k]*(b/2))))/math.sqrt(2*probs[k]*p[k])\n loss = loss + (1 / (1 + math.exp(-x)))\n loss = loss / len(probs)\n return loss\n\n# TODO\ndef J_w(theta, X, qubits, nr_layers, shots):\n simulator = cirq.Simulator()\n p = np.zeros([len(X),])\n for i in range(len(X)):\n p_hold =0\n c = cirq.Circuit()\n c.append(circuit(qubits, X[i], theta, nr_layers))\n results = simulator.run(c, repetitions=shots)\n probability_estimate(results)\n p[i] = p_hold/shots\n return p\n\ndef calibration():\n stat = 25\n hold_c0 = parameters[0]\n initial_c = parameters[1]\n delta_obj = 0\n for i in range(stat):\n print(i)\n delta = 2 * np.random.randint(2, size = len(theta)) - 1\n obj_plus = J_w(theta+initial_c*delta, X_t, qubits, nr_layers, shots)\n obj_minus = J_w(theta+initial_c*delta, X_t, qubits, nr_layers, shots)\n loss_p = squared_loss(Y_t, obj_plus)\n loss_m = squared_loss(Y_t, obj_minus)\n delta_obj += np.absolute(loss_p - loss_m) / stat\n\n #c_new = hold_c0 * 2 / delta_obj * initial_c \n\n# Helpful function that shows a progress bar\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n\t\"\"\"\n\tCall in a loop to create terminal progress bar\n\t@params:\n\t\titeration - Required : current iteration (Int)\n\t\ttotal\t - Required : total iterations (Int)\n\t\tprefix\t - Optional : prefix string (Str)\n\t\tsuffix\t - Optional : suffix string (Str)\n\t\tdecimals\t- Optional : positive number of decimals in percent complete (Int)\n\t\tlength\t - Optional : character length of bar (Int)\n\t\tfill\t\t- Optional : bar fill character (Str)\n\t\"\"\"\n\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n\tfilledLength = int(length * iteration // total)\n\tbar = fill * filledLength + '-' * (length - filledLength)\n\tprint('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n\t# Print New Line on Complete\n\tif iteration == total: \n\t\tprint()\n\n# Reads the data from a file\ndef read_from_file(filename):\n f = open(filename,\"r\")\n f.readline()\n nr_qubits = int(f.readline().split(\" \")[-1])\n nr_layers = int(f.readline().split(\" \")[-1])\n batch_size = int(f.readline().split(\" \")[-1])\n shots = int(f.readline().split(\" \")[-1])\n iterations = int(f.readline().split(\" \")[-1])\n seed = int(f.readline().split(\" \")[-1])\n Tot_Loss = []\n f.readline()\n f.readline()\n for i in range(iterations):\n Tot_Loss.append(float(f.readline().strip()))\n theta = []\n f.readline()\n for i in range((nr_qubits*2)*(nr_layers+1)):\n theta.append(float(f.readline().strip()))\n return nr_qubits, nr_layers, batch_size, shots, iterations, seed, Tot_Loss, theta\n\n# Writes data to a file\ndef write_to_file(nr_qubits, nr_layers, batch_size, shots, iterations, seed, Tot_Loss, theta):\n # Open new file\n counter = 0\n filename = file\n while os.path.isfile(filename.format(counter)):\n counter += 1\n filename = filename.format(counter)\n f = open(filename,\"w+\")\n # Write Params, Tot_Loss and end_theta to file\n f.write(\"RUN PARAMS:\\n\")\n f.write(\"\\tnr_qubits: %d\\n\" % nr_qubits)\n f.write(\"\\tnr_layers: %d\\n\" % nr_layers)\n f.write(\"\\tbatch_size: %d\\n\" % batch_size)\n f.write(\"\\tshots: %d\\n\" % shots)\n f.write(\"\\titerations: %d\\n\" % iterations)\n f.write(\"\\tseed: %d\\n\" % seed)\n f.write(\"\\tRESULTS:\\n\")\n f.write(\"\\t\\tTot_Loss:\\n\\t\\t\\t\")\n f.write(\"\\n\\t\\t\\t\".join(str(elem) for elem in Tot_Loss))\n f.write(\"\\n\\t\\teind_theta:\\n\\t\\t\\t\")\n f.write(\"\\n\\t\\t\\t\".join(str(elem) for elem in theta))\n\n# Main function which runs the variational classifier\ndef main():\n # Set up qubit register\n qubits = [cirq.GridQubit(i, 0) for i in range(nr_qubits)]\n\n # Load the data and split parameters and labels\n df = pd.read_csv(\"QA_data_x.csv\")\n X = df.iloc[:,:3].to_numpy()\n Y = df.iloc[:,3].to_numpy()\n\n # Initialise training data\n rows = random.sample(range(len(X)),int(np.round((4/5)*len(X)))) # Get a percentage of the data as training data\n i_t = [x[0] for x in rows] # Get the training indexes\n X_t = X[i_t] # Get the training parameters\n Y_t = Y[i_t] # Get the training labels\n i_s = [i for i in range(len(X)) if i not in i_t] # Get the test indexes\n X_s = X[i_s] # Get the test parameters\n Y_s = Y[i_s] # Get the test labels\n# indexes = np.array(range(len(X))) # \n# m = int(np.round((4/5)*len(X))) # \n# train = np.random.choice(len(X), m, replace=False) # \n# test = indexes[~np.isin(indexes,train)] # \n# X_t = X[train,:] # \n# Y_t = Y[train] #\n\n # Initialise theta\n nr_par = (nr_qubits*2)*(nr_layers+1)\n init_theta = np.random.rand(nr_par,)*(2*pi)\n b = 0\n theta = np.append(init_theta,b)\n\n # Initialise classifier parameters\n eye = np.eye(nr_par)\n a = 2.5\n c = 0.1\n alpha = 0.602\n gamma = 0.101\n parameters = np.array([a,c,alpha,gamma])\n batch_ix = np.array(range(len(X_t)))\n plot_ix = 5\n P = int(iterations/plot_ix)\n tot_loss = np.zeros(P)\n Tot_Loss = np.zeros(iterations)\n z = a/pi\n loss_est = 0\n iw = 0\n \n # Start progress bar\n start = time.time()\n printProgressBar(0, iterations, prefix = 'Progress:', suffix = 'Complete', length = 50)\n # Start iterations\n for k in range(1,iterations+1):\n # Update parameters\n #batch_ix = np.random.randint(0, len(X_t), (batch_size,))\n c_n = c/(k**(gamma))\n a_n = a/(k**(alpha))\n z_n = z/(k**(alpha))\n gradient = np.zeros(nr_par)\n delta_n = 2*np.random.randint(2, size = nr_par+1) - 1\n\n # Run variational classifier with theta+delta and theta-delta\n p_plus = J_w(theta+c_n*delta_n, X_t, qubits, nr_layers, shots)\n p_minus = J_w(theta-c_n*delta_n, X_t, qubits, nr_layers, shots)\n # Calculate the loss for each run\n loss_plus = R(Y_t, p_plus, theta[-1]) \n loss_minus = R(Y_t, p_minus, theta[-1])\n\n # Compute gradient and update theta accordingly\n grad = ((loss_plus - loss_minus)/(2*c_n))/delta_n\n theta[1:-1] = (theta[1:-1] - a_n*grad[1:-1]) #% (2*pi)\n theta[-1] = (theta[-1] - z_n*grad[-1]) \n # parameter b is probably taking too large steps.\n\n # Save average loss for plotting\n Tot_Loss[k-1] = (loss_plus + loss_minus)/2\n # Add step to progress bar\n printProgressBar(k, iterations, prefix = 'Progress:', suffix = 'Complete', length = 50)\n\n # Finish progress bar\n printProgressBar(iterations, iterations, prefix = 'Progress:', suffix = 'Complete', length = 50)\n end = time.time() \n # Print time taken for all iterations\n print(end - start)\n\n # Plot average loss per iteration over all iterations\n fig = plt.figure(figsize=(15,10))\n plt.plot(range(1,iterations+1), Tot_Loss, 'g-', markersize=2)\n\n write_to_file(nr_qubits, nr_layers, batch_size, shots, iterations, seed, Tot_Loss, theta)\n\n# Start main\nmain()","repo_name":"MSwenne/QuantumProject","sub_path":"Jan6_VariationalClassifier.py","file_name":"Jan6_VariationalClassifier.py","file_ext":"py","file_size_in_byte":11862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42611137142","text":"# This script extracts ROIs around manually annotated fibers, for use in later training.\n\nimport numpy as np\nimport skimage.measure as sm\nimport data\nimport math\nimport sys\n\ntomogramfile = sys.argv[1]\nannotationfile = sys.argv[2]\noutputtomogram = sys.argv[3]\noutputannotation = sys.argv[4]\n\n# Read annotation and tomogram from disk\nann = data.read_tomogram(annotationfile, domean=False)\nrec = data.read_tomogram(tomogramfile, domean=False)\nprint(ann.shape)\nprint(rec.shape)\nprint(ann.min(),ann.max())\n\n# Determine bounds of annotated parts\nmnsx = []\nmnsy = []\nmxsx = []\nmxsy = []\nzis = []\nfor i in range(ann.shape[0]): \n if ann[i].max()>0:\n zis.append(i)\n cont = sm.find_contours(ann[i],0.05)\n for c in cont:\n mnsx.append(c[:,0].min())\n mnsy.append(c[:,1].min())\n mxsx.append(c[:,0].max())\n mxsy.append(c[:,1].max())\nmnx = int(min(mnsx))\nmny = int(min(mnsy))\nmxx = int(math.ceil(max(mxsx)))\nmxy = int(math.ceil(max(mxsy)))\nprint(mnx, mxx, mny, mxy)\n\n# Extract annotated parts with padding\npd = 10\ninp = rec[min(zis):max(zis)+1, mnx-pd:mxx+pd, mny-pd:mxy+pd]\ntar = ann[min(zis):max(zis)+1, mnx-pd:mxx+pd, mny-pd:mxy+pd]\ntar = (tar>0).astype(np.uint8)\n\n# Save extracted ROI\nimport tifffile\ntifffile.imsave(outputtomogram, inp)\ntifffile.imsave(outputannotation, tar)\n","repo_name":"dmpelt/jumbo-bacteriophage","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14035799847","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:\n q=head\n while q and q.next:\n q.val, q.next.val=q.next.val,q.val\n q=q.next.next\n else:\n return head","repo_name":"Mihretthe/Competitive-Programming","sub_path":"0024-swap-nodes-in-pairs/0024-swap-nodes-in-pairs.py","file_name":"0024-swap-nodes-in-pairs.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2875412026","text":"def color_plan(n, m):\n if n > m:\n return \"impossible\"\n if m >= n:\n temp_m = m\n result = 1\n for _ in range(n):\n result *= temp_m\n temp_m -= 1\n return result\n\n if n == 1:\n return m\n return m * (m - 1) ** (n - 1) - question1(n - 1, m)\n\n\nprint(color_plan(2, 2))\n","repo_name":"Michael-Pan95/Algorithm_Python","sub_path":"color_plan.py","file_name":"color_plan.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"72455577102","text":"import numpy as np\nimport copy\nimport subprocess\nimport csv\n\n\ndef calculate_histogram(sequence:list):\n histogram = np.array([0 for i in range(256)])\n for x in sequence:\n histogram[x] += 1\n return histogram\n\ndef calculate_normalized_histogram(sequence:list):\n histogram = np.array([0 for i in range(256)])\n for x in sequence:\n histogram[x] += 1\n return histogram / len(sequence)\n\ndef append_bytes_given_target_histogram(byte_sequence, original_histogram, target_histogram):\n target_byte_sequence = copy.deepcopy(byte_sequence)\n for i in range(original_histogram.shape[0]):\n diff = target_histogram[i] - original_histogram[i]\n for j in range(diff):\n target_byte_sequence.append(i)\n return target_byte_sequence\n\ndef append_bytes_given_target_bytes(original_byte_sequence: list, bytes_to_append: tuple):\n target_byte_sequence = copy.deepcopy(original_byte_sequence)\n for tup in bytes_to_append:\n for j in range(tup[1]):\n target_byte_sequence.append(tup[0])\n return target_byte_sequence\n\ndef heuristic_approach(original_byte_sequence, original_normalized_histogram, target_normalized_histogram):\n original_histogram = calculate_histogram(original_byte_sequence)\n target_length = int(max([original_histogram[i]/target_normalized_histogram[i] for i in range(original_histogram.shape[0])]))\n resulting_histogram = np.array([int(target_length*target_normalized_histogram[i]) for i in range(original_histogram.shape[0])])\n return resulting_histogram, resulting_histogram / target_length\n\ndef create_byte_sizes_file(byte_histogram: np.array, output_filepath: str):\n with open(output_filepath, \"w\") as output_file:\n for i, value in enumerate(byte_histogram.tolist()):\n output_file.write(\"{} {}\\n\".format(i, value))\n\ndef create_ratios_file(normalized_byte_histogram: np.array, output_filepath: str):\n with open(output_filepath, \"w\") as output_file:\n for i, value in enumerate(normalized_byte_histogram.tolist()):\n output_file.write(\"{} {}\\n\".format(i, value))\n\ndef run_solver(gap=0.001):\n print(\"Gap: \", gap)\n if gap == 0.01:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.01.sh\"\n ]\n )\n if gap == 0.008:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.008.sh\"\n ]\n )\n if gap == 0.005:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.005.sh\"\n ]\n )\n if gap == 0.003:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.003.sh\"\n ]\n )\n if gap == 0.001:\n print(\"Entering here!\")\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.001.sh\"\n ]\n )\n if gap == 0.0008:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.0008.sh\"\n ]\n )\n if gap == 0.0005:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.0005.sh\"\n ]\n )\n if gap == 0.0003:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.0003.sh\"\n ]\n )\n if gap == 0.0001:\n result = subprocess.run(\n [\n \"sh\",\n \"run-solver-gap0.0001.sh\"\n ]\n )\n print(result.returncode)\n\ndef read_solution(solution_filepath: str):\n solution = []\n with open(solution_filepath, \"r\") as solution_file:\n reader = csv.reader(solution_file, delimiter='\\t')\n for row in reader:\n #print((int(row[0].split(\"#\")[-1]), int(float(row[1]))))\n solution.append((int(row[0].split(\"#\")[-1]), int(float(row[1]))))\n return solution\n\n\n\n\n","repo_name":"danielgibert/adv_mlw_examples_generation_with_gans","sub_path":"src/gan_implementations/solver/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"27495290022","text":"'''\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nQuestion: Treasure Island I\nYou have a map that marks the location of a treasure island.\nSome of the map area has jagged rocks and dangerous reefs. Other areas are safe to sail in.\nThere are other explorers trying to find the treasure. So you must figure out a shortest route to the treasure island.\nAssume the map area is a two dimensional grid, represented by a matrix of characters.\nYou must start from the top-left corner of the map and can move one block up, down, left or right at a time.\nThe treasure island is marked as 'X' in a block of the matrix. 'X' will not be at the top-left corner.\nAny block with dangerous rocks or reefs will be marked as 'D'. You must not enter dangerous blocks. You cannot leave the map area.\nOther areas 'O' are safe to sail in. The top-left corner is always safe.\nOutput the minimum number of steps to get to the treasure.\ne.g.\nInput\n[\n['O', 'O', 'O', 'O'],\n['D', 'O', 'D', 'O'],\n['O', 'O', 'O', 'O'],\n['X', 'D', 'D', 'O']\n]\nOutput\nRoute is (0, 0), (0, 1), (1, 1), (2, 1), (2, 0), (3, 0) The minimum route takes 5 steps.\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n'''\n\nfrom collections import deque\n\n\ndef treasureIsland(matrix):\n '''\n # case 1 - example input\n >>> matrixA = [['O', 'O', 'O', 'O'],['D', 'O', 'D', 'O'],['O', 'O', 'O', 'O'],['X', 'D', 'D', 'O']]\n >>> treasureIsland(matrixA)\n 5\n\n # case 2 - matrix with no treasure\n >>> matrixB = [['O', 'O', 'O', 'O'],['D', 'O', 'D', 'O'],['O', 'O', 'O', 'O'],['O', 'D', 'D', 'O']]\n >>> treasureIsland(matrixB)\n -1\n\n # case 3 - empty matrix 1\n >>> matrixC = []\n >>> treasureIsland(matrixC)\n -1\n\n # case 4 - empty matrix 2\n >>> matrixD = [[]]\n >>> treasureIsland(matrixD)\n -1\n '''\n # if matrix is empty, return -1\n if (not matrix) or (not matrix[0]):\n return -1\n # store the index of matrix and steps in queue\n queue = deque()\n queue.append([0, 0, 0])\n while queue:\n i, j, steps = queue.popleft()\n # if the treasure island is found, return the steps\n if matrix[i][j] == 'X':\n return steps\n # mark visited index\n matrix[i][j] = 'D'\n # check all the adjacent indexes\n for x, y in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]:\n if (0 <= x < len(matrix)) and (0 <= y < len(matrix[0])) and (matrix[x][y] != 'D'):\n queue.append([x, y, steps + 1])\n # if the treasure island is not found, return -1\n return -1\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()","repo_name":"workprinond/DS_-_Algo_TechInterview_Practise","sub_path":"Beginning/treasure_island1.py","file_name":"treasure_island1.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27411122669","text":"#!/usr/bin/env python3\n\n# Needed For Main\nimport os\nimport sys\nimport logging\nimport time\nimport socket\nimport hvac\n\nclass Vault:\n shares = 5\n threshold = 3\n rootToken = ''\n keys = []\n\n def __init__(self, addr=None, keys=None):\n if addr:\n self.connect(addr)\n if keys:\n self.unseal(keys)\n\n def wait_for_port(self,port, host='127.0.0.1', timeout=5.0):\n logging.info('Checking If Vault Service Is Available...')\n start_time = time.perf_counter()\n while True:\n try:\n with socket.create_connection((host, port), timeout=timeout):\n break\n except OSError as ex:\n time.sleep(0.01)\n if time.perf_counter() - start_time >= timeout:\n raise TimeoutError('Waited too long for the port {} on host {} to start accepting connections.'.format(port, host)) from ex\n logging.info('Vault Service Found.')\n\n def connect(self, addr=None):\n urlSplit = addr.replace('/','').split(':')\n self.protocol = urlSplit[0]\n self.host = urlSplit[1]\n if len(urlSplit) > 1:\n self.port = urlSplit[2]\n else:\n self.port = '8200'\n os.environ['no_proxy'] = self.host\n\n self.wait_for_port(host=self.host, port=self.port)\n\n logging.info('Connecting To Vault Client...')\n self.client = hvac.Client(url=addr)\n logging.info('Connected Successfully!')\n \n def initialize(self, shares=5, threshold=3):\n if self.client.sys.is_initialized():\n logging.info('Vault Has Already Been Initialized.')\n else:\n logging.info('Vault Is Not Initialized. Initializing Now...')\n result = self.client.sys.initialize(shares, threshold)\n self.rootToken = result['root_token']\n self.keys = result['keys']\n logging.info('Vault Has Been Successfully Initialized.')\n logging.info('ROOT TOKEN: %s', self.rootToken)\n for num, key in enumerate(self.keys, start=1):\n logging.info('UNSEAL KEY %s: %s', num, key)\n logging.info('')\n logging.info('WARNING: LOOSING THESE DETAILS WILL RESULT IN LOSS OF ACCESS TO SECURE DATA!')\n return True\n\n def unseal(self, keys=None):\n if self.client.sys.is_sealed():\n if keys:\n logging.info('Vault Is Currently Sealed. Unsealing Vault...')\n self.client.sys.submit_unseal_keys(keys)\n if self.client.sys.is_sealed():\n logging.error('The Vault Is Still Sealed. An Error Occured During Unsealing Process!')\n return False\n else:\n logging.info('Vault Has Been Successfully Unsealed.')\n else:\n logging.error('Keys Have Not Been Supplied To Unseal Vault!')\n return False\n else:\n logging.info('Vault Is Already Unsealed.')\n return True\n\n def is_initialized(self):\n return self.client.sys.is_initialized()\n\n def is_sealed(self):\n return self.client.sys.is_sealed()\n\n def is_ready(self):\n if self.is_initialized():\n if self.is_sealed():\n return False\n else:\n return False\n return True","repo_name":"geoffh1977/vault-sidecar","sub_path":"app/vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38793637176","text":"from Commom import *\nfrom Tools import *\n\n\nclass Focus:\n \"\"\"\n 组件聚焦后\n \"\"\"\n instance = None\n\n def __init__(self, win):\n self.win = win\n self.frame = None\n self.frame_input = None\n self.frame_label = None\n self.frame_button = None\n self.create_frame() # 初始化生成frame容器\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n 单例模式\n :param args:\n :param kwargs:\n \"\"\"\n if cls.instance is None:\n cls.instance = super().__new__(cls) # super().__new__(cls)这一步操作就可以去实现给对象分配内存地址\n return cls.instance\n\n def create_frame(self):\n \"\"\"\n 初始化生成frame容器\n :return:\n \"\"\"\n self.frame = tk.Frame(self.win, name='障碍编辑容器')\n self.frame.pack()\n self.frame_button = tk.Frame(self.frame)\n self.frame_button.pack(side=\"bottom\")\n self.frame_label = tk.Frame(self.frame)\n self.frame_label.pack(side=\"left\")\n # self.frame_select = tk.Frame(self.frame)\n # self.frame_select.pack(side=\"right\")\n self.frame_input = tk.Frame(self.frame)\n self.frame_input.pack(side=\"right\")\n\n def update(self, obj, obstacle, info=None, state=None, com_info=None):\n \"\"\"\n 点击组件后生成信息框\n :param obj: scale类对象\n :param obstacle: 标识\n :param info: 输入框中的内容\n :return: 返回input和button容器\n :param com_info:\n :param state:\n \"\"\"\n self.create_frame()\n self.remove()\n if obstacle == \"oxer\":\n tk.Label(self.frame_label, text='A-->B(m):').pack()\n var_a_b = tk.StringVar(value=info[0] if info else '')\n a_b = Entry(self.frame_input, textvariable=var_a_b, width=5)\n a_b.pack()\n tk.Button(self.frame_button, text=\"确认\").pack()\n\n elif obstacle == \"tirail\":\n tk.Label(self.frame_label, text='A-->B(m):').pack()\n var_a_b = tk.StringVar(value=info[0] if info else '')\n a_b = Entry(self.frame_input, textvariable=var_a_b, width=5, )\n a_b.pack()\n tk.Label(self.frame_label, text='B-->C(m):').pack()\n var_b_c = tk.StringVar(value=info[1] if info else '')\n b_c = Entry(self.frame_input, textvariable=var_b_c, width=5)\n b_c.pack()\n tk.Button(self.frame_button, text=\"确认\").pack()\n\n elif obstacle == \"combination_ab\" or obstacle == \"combination_abc\":\n self.combination(obj, com_info, obstacle, state)\n elif obstacle == \"water\":\n self.water(info)\n elif obstacle == \"live\":\n self.live(info, obj)\n return self.frame_input, self.frame_button\n\n def live(self, info, obj):\n \"\"\"\n 利物浦聚焦输入框\n :param obj:\n :param info:\n :return:\n \"\"\"\n check = tk.StringVar(value='0')\n tk.Label(self.frame_label, text='宽(m):').pack()\n water_width_var = tk.StringVar(value=info[0] if info else '2')\n water_width_ent = Entry(self.frame_input, textvariable=water_width_var, width=5, name='water_w_ent')\n water_width_ent.pack()\n tk.Label(self.frame_label, text='长(m):').pack()\n water_height_var = tk.StringVar(value=info[0] if info else '4')\n water_height_ent = Entry(self.frame_input, textvariable=water_height_var, width=5, name='water_h_ent')\n water_height_ent.pack()\n water_height_ent.bind(\"\", water_width_ent.undo)\n\n tk.Button(self.frame_button, text=\"确认\").pack()\n Checkbutton(self.frame_button, text='双横木', variable=check, onvalue=1, offvalue=0,\n command=partial(self.live_two, obj, check)).pack()\n\n @staticmethod\n def live_two(obj, check):\n check = check.get()\n set_live(check)\n if check == '1':\n obj.img_path = live_two_tool()\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n elif check == '0':\n obj.img_path = live_one_tool()\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n\n def water(self, info):\n \"\"\"\n 水障聚焦框\n :param info:\n :return:\n \"\"\"\n tk.Label(self.frame_label, text='宽(m):').pack()\n water_width_var = tk.StringVar(value=info[0] if info else '3')\n water_width_ent = Entry(self.frame_input, textvariable=water_width_var, width=5)\n water_width_ent.pack()\n tk.Label(self.frame_label, text='长(m):').pack()\n water_height_var = tk.StringVar(value=info[0] if info else '4')\n water_height_ent = Entry(self.frame_input, textvariable=water_height_var, width=5)\n water_height_ent.pack()\n tk.Button(self.frame_button, text=\"确认\").pack()\n\n def combination(self, obj, info, obstacle, state):\n \"\"\"\n 组合障碍聚焦输入框\n :param obj:\n :param info:\n :param obstacle:\n :param state:\n :return:\n \"\"\"\n a = b = c = '0'\n if state:\n a = '1' if state['ent_a'] == 'normal' else '0'\n b = '1' if state['ent_b'] == 'normal' else '0'\n try:\n c = '1' if state['ent_c'] == 'normal' else '0'\n except KeyError:\n pass\n\n checkvar_a = tk.StringVar(value=a, name=\"checkvar_a\")\n checkvar_b = tk.StringVar(value=b, name=\"checkvar_b\")\n checkvar_c = tk.StringVar(value=c, name=\"checkvar_c\")\n\n var_a = tk.StringVar(value=info['ent_a'] if info else '')\n ent_a = Entry(self.frame_input, textvariable=var_a, width=5,\n state=state[\"ent_a\"] if state else \"disabled\",\n name=\"ent_a\")\n ent_a.pack()\n\n Checkbutton(self.frame_label, text=\"A双横木(cm)\", variable=checkvar_a, onvalue=1, offvalue=0,\n command=partial(self.oxer_a, checkvar_a, checkvar_b, checkvar_c, ent_a, obj, obstacle,\n var_a)).pack()\n\n tk.Label(self.frame_label, text='A-->B(m):').pack()\n var_a_b = tk.StringVar(value=info['ent_a_b'] if info else '3')\n ent_a_b = Entry(self.frame_input, textvariable=var_a_b, width=5, name=\"ent_a_b\")\n ent_a_b.pack()\n\n var_b = tk.StringVar(value=info['ent_b'] if info else '')\n ent_b = Entry(self.frame_input, textvariable=var_b, width=5,\n state=state[\"ent_b\"] if state else \"disabled\",\n name='ent_b')\n ent_b.pack()\n\n Checkbutton(self.frame_label, text=\"B双横木(cm)\", variable=checkvar_b, onvalue=1, offvalue=0,\n command=partial(self.oxer_b, checkvar_a, checkvar_b, checkvar_c, ent_b, obj, obstacle,\n var_b)).pack()\n if obstacle == \"combination_abc\":\n tk.Label(self.frame_label, text='B-->C(m):').pack()\n var_b_c = tk.StringVar(value=info['ent_b_c'] if info else '3')\n ent_b_c = Entry(self.frame_input, textvariable=var_b_c, width=5, name=\"ent_b_c\")\n ent_b_c.pack()\n var_c = tk.StringVar(value=info['ent_c'] if info else '')\n ent_c = Entry(self.frame_input, textvariable=var_c, width=5,\n state=state[\"ent_c\"] if state else \"disabled\",\n name=\"ent_c\")\n ent_c.pack()\n Checkbutton(self.frame_label, text=\"C双横木(cm)\", variable=checkvar_c, onvalue=1, offvalue=0,\n command=partial(self.oxer_c, checkvar_a, checkvar_b, checkvar_c, ent_c, obj, var_c)).pack()\n\n tk.Button(self.frame_button, text=\"确认\").pack()\n return checkvar_a, checkvar_b\n\n def remove(self):\n \"\"\"\n 删除容器中的内容\n :return:\n \"\"\"\n for i in self.frame_label.winfo_children():\n i.destroy()\n for i in self.frame_input.winfo_children():\n i.destroy()\n for i in self.frame_button.winfo_children():\n i.destroy()\n # for i in self.frame_select.winfo_children():\n # i.destroy()\n\n @staticmethod\n def oxer(x1, ent, var):\n \"\"\"\n :param var:\n :param x1:\n :param ent:\n :return:\n \"\"\"\n if x1.get() == '1':\n ent.config(state='normal', )\n # if not var.get():\n # print(var.get())\n # var.set('1')\n elif x1.get() == '0':\n ent.config(state='disabled')\n\n def oxer_a(self, x1, x2, x3, ent_a, obj, obstacle, var_a):\n \"\"\"\n 选中A障碍是否为双横木\n :param var_a:\n :param x1: a障碍是否为双横木\n :param x2: b障碍是否为双横木\n :param x3: c障碍是否为双横木\n :param ent_a: 输入框\n :param obj: 组件对像\n :param obstacle: ab组合还是abc\n :return:\n \"\"\"\n self.oxer(x1, ent_a, var_a)\n if obstacle == \"combination_abc\":\n self.judge_abc(x1, x2, x3, obj)\n return\n if x1.get() == '1':\n try:\n if x2.get() == '1':\n self.com(obj)\n return\n elif x2.get() == '0':\n pass\n except Exception as e:\n print(e)\n logging.warning(e)\n obj.img_path = oxer_obs_ab(stare_a=x1.get(), state_b=x2.get())\n # obj.img_path = merge(5, m1=20)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n elif x1.get() == '0':\n try:\n if x2.get() == '1':\n obj.img_path = oxer_obs_ab(stare_a=x1.get(), state_b=x2.get())\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n return\n elif x2.get() == '0':\n obj.img_path = merge_ab(state=1, m1=30)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n return\n except Exception as e:\n print('a障碍', e)\n logging.warning('a障碍', e)\n\n def oxer_b(self, x1, x2, x3, ent_b, obj, obstacle, var_b):\n \"\"\"\n 选中B障碍是否为双横木\n :param var_b:\n :param x1:\n :param x2:\n :param x3:\n :param ent_b:\n :param obj:\n :param obstacle:\n :return:\n \"\"\"\n self.oxer(x2, ent_b, var_b)\n if obstacle == \"combination_abc\":\n self.judge_abc(x1, x2, x3, obj)\n return\n\n if x2.get() == '1':\n try:\n if x1.get() == '1':\n self.com(obj)\n return\n elif x1.get() == '0':\n pass\n except:\n pass\n obj.img_path = oxer_obs_ab(stare_a=x1.get(), state_b=x2.get())\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n elif x2.get() == '0':\n try:\n if x1.get() == '1':\n obj.img_path = oxer_obs_ab(stare_a=x1.get(), state_b=x2.get())\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n return\n elif x1.get() == '0':\n obj.img_path = merge_ab(state=1, m1=30)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n return\n except Exception as e:\n print('b障碍', e)\n logging.warning('b障碍', e)\n\n def oxer_c(self, x1, x2, x3, ent_c, obj, var_c):\n self.oxer(x3, ent_c, var_c)\n self.judge_abc(x1, x2, x3, obj)\n\n @staticmethod\n def com(obj):\n \"\"\"\n 当两个都是双横木\n :param obj:\n :return:\n \"\"\"\n obj.img_path = merge_ab(state=2, m1=30)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n\n @staticmethod\n def combination_abc(obj, x1, x2, x3):\n obj.img_path = oxer_obs_ab(stare_a=x1.get(), state_b=x2.get(), state_c=x3.get(), b_c=30)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n\n def judge_abc(self, x1, x2, x3, obj):\n try:\n if x1.get() == '1':\n if x2.get() == '1':\n if x3.get() == '1':\n obj.img_path = merge_ab(state=2, m1=30, m2=30)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n return\n elif x3.get() == '0':\n self.combination_abc(obj, x1, x2, x3)\n return\n elif x2.get() == '0':\n if x3.get() == '1':\n self.combination_abc(obj, x1, x2, x3)\n return\n elif x3.get() == '0':\n self.combination_abc(obj, x1, x2, x3)\n return\n elif x1.get() == '0':\n if x2.get() == '1':\n if x3.get() == '1':\n self.combination_abc(obj, x1, x2, x3)\n return\n elif x3.get() == '0':\n self.combination_abc(obj, x1, x2, x3)\n return\n elif x2.get() == '0':\n if x3.get() == '1':\n self.combination_abc(obj, x1, x2, x3)\n return\n elif x3.get() == '0':\n obj.img_path = merge_ab(state=1, m1=30, m2=30)\n obj.img = Image.open(obj.img_path)\n obj.temp_path = ImageTk.PhotoImage(obj.img)\n obj.app.itemconfig(obj.tag, image=obj.temp_path)\n return\n except Exception as e:\n print('abc障碍', e)\n logging.warning('abc障碍', e)\n","repo_name":"kaliluying/Route_design","sub_path":"focus.py","file_name":"focus.py","file_ext":"py","file_size_in_byte":15111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"4076895151","text":"import requests\r\nimport threading\r\nclass proxyChecker:\r\n def __init__(self):\r\n print('ok')\r\n\r\n def start(self, proxy):\r\n try:\r\n result = requests.get('https://www.instagram.com', proxies={'http':str(proxy), 'https': str(proxy)}, timeout=4)\r\n \r\n if result.status_code == 200:\r\n print('Good ' + proxy)\r\n with open('good.txt', 'a+') as f:\r\n f.write(proxy + '\\n')\r\n else:\r\n return\r\n except:\r\n return\r\n def _thread(self):\r\n with open('proxies.txt', 'r') as f:\r\n for proxy in f:\r\n proxy = proxy.replace('\\n', '')\r\n attempting = True\r\n while attempting == True:\r\n if threading.active_count() <= 300:\r\n threading.Thread(target=self.start, args=(proxy,)).start()\r\n attempting = False\r\n\r\n\r\nif __name__ == '__main__':\r\n check = proxyChecker()\r\n check._thread()\r\n","repo_name":"fj11j/Aged-Instagram-Jacker","sub_path":"proxy_checker.py","file_name":"proxy_checker.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"33171725329","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n__author__ = \"Andres Mendez-Vazquez\"\n__copyright__ = \"Copyright 2017 Sampler Project\"\n__credits__ = [\"Andres Mendez-Vazquez\"]\n__license__ = \"GPL\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Andres Mendez-Vazquez\"\n__email__ = \"amendez@gdl.cinvestav.mx\"\n__status__ = \"Development\"\n__name__ = \"__main__\"\n\nimport numpy as np \nfrom HeapSort import HeapSort\n\nif __name__ == \"__main__\":\n\n x = np.arange(10, 0, -1, dtype = np.int32 )\n\n print(x)\n\n HObject = HeapSort()\n\n HObject.Sort(x)\n\n HObject.print_container()","repo_name":"kajuna0amendez/Algorithms","sub_path":"HeapSort/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"6582466127","text":"import socket\n\nserverPort = 1200\n\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \nserverSocket.bind(('', serverPort))\n\nsocketState = True\nprint(\"The server is ready to receive\")\n\nwhile socketState:\n \n print(\"---- WAITING... ----\")\n message, clientAddress = serverSocket.recvfrom(2048)\n \n print(f\"RECEIVIED FROM: {clientAddress}\")\n print(f\"RECEIVIED DATA: {message}\")\n message = message.decode()\n \n if message == 'close':\n print(\"---- BYE ----\")\n socketState = False\n serverSocket.close()\n else:\n modifiedMessage = message.upper()\n serverSocket.sendto(modifiedMessage.encode(), clientAddress)\n print(\"---- SENT ----\")\n\nprint(\"---- CLOSED SUCCESSFULLY ----\")\n","repo_name":"adel-elmala/networking","sub_path":"python-socket-programming-assignments/chap2Tests/UDPServer.py","file_name":"UDPServer.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15845400422","text":"#!/usr/bin/python\n\nimport os\n\nclass CoreExcitation(object):\n\n prefix = './'\n\n def __init__(self, atoms, element, calc=None, directory='./'):\n self.atoms = atoms\n self.element = element\n self.calc = calc\n self.directory = directory\n\n # Move through each atom directory and change the index element to X\n # and create the input files for that directory\n def move_hole(self, element, system):\n self._create_subdirectories()\n for idx in self.idx:\n if self.atoms.symbols[idx - 1] == 'X':\n self.atoms.symbols[idx - 1] = self.element\n self.atoms.symbols[idx] = 'X'\n self._create_input(idx)\n for idx in self.idx:\n self._change_element(idx, element, system)\n \n # Create individual subdirectories for each atom of chosen element\n def _create_subdirectories(self):\n self._find_all_elements()\n for idx in self.idx:\n os.makedirs(self.prefix + self.element + str(idx))\n\n # Get the index number for all atoms of the chosen element \n def _find_all_elements(self):\n self.idx = [] \n i = 0\n for elem in self.atoms.symbols:\t\t\t\t\n if elem == self.element:\n self.idx.append(i)\n i += 1\t\n\n # Use the ASE CASTEP calculator to write the .cell and .param input files\n def _create_input(self, idx):\n directory = self.prefix + self.element + str(idx)\n self.calc._directory = directory\n self.atoms.set_calculator(self.calc)\n self.calc.prepare_input_files()\n\n # Go into the all the cell files and change the instances of element X\n # to the correct chosen excited element\n def _change_element(self, idx, element, system):\n search = 'X '\n replace = element + ':exc '\n for idx in self.idx:\n with open(self.prefix + element + str(idx) + '/' + system + '.cell','r') as file:\n text = file.read()\n text = text.replace(search,replace)\n with open(self.prefix + element + str(idx) + '/' + system + '.cell', 'w') as file:\n file.write(text)\nclass NEXAFS(CoreExcitation):\n\n prefix = 'NEXAFS/'\n\n def __init__(self, atoms, element, pspots, calc=None, directory='./'):\n super(NEXAFS, self).__init__(atoms, element, calc)\n # Set the NEXAFS specific keywords in the calculator\n self.calc.param.task = 'ELNES'\n self.calc.param.charge = 0.5\n # Set the half core-hole pseudopotential string for the chosen element\n self.calc.species_pot = [('{}:exc'.format(element), pspots)]\n\nclass XPS(CoreExcitation):\n\n prefix = 'XPS/'\n\n def __init__(self, atoms, element, pspots, calc=None, directory='./'):\n super(XPS, self).__init__(atoms, element, calc)\n # Set the XPS specific keywords in the calculator\n self.calc.param.task = 'SINGLEPOINT'\n self.calc.param.charge = 1.0\n # Set the full core-hole pseudopotential string for the chosen element \n self.calc.species_pot = [('{}:exc'.format(element), pspots)]\n","repo_name":"maurergroup/core-level-spec-tools","sub_path":"CASTEP/core_excitation.py","file_name":"core_excitation.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"31775860185","text":"from FGAme.tests.physics import test_particle as base\nfrom FGAme.physics.bodies import Body\nfrom smallshapes.tests import abstract as shape_tests\n\n\nclass TestBody(base.TestParticle, shape_tests.TestSolid):\n base_cls = Body\n\n def test_aabb_limits_aliases(self, obj):\n assert obj.xmin == obj.left\n assert obj.xmax == obj.right\n assert obj.ymin == obj.bottom\n assert obj.ymax == obj.top\n\n def test_aabb_coords_setters(self, obj):\n pos = obj.pos\n obj.xmin += 10\n assert obj.x == obj.pos.x","repo_name":"ahhyoushh/reminder","sub_path":"env/lib/python3.10/site-packages/FGAme/tests/physics/test_body.py","file_name":"test_body.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"71246757583","text":"import sys\nimport networkx as nx\nfrom func_utils import *\n\n## Adds a new node that is connected to all others\n\n## Input file format:\n## VertexId Label Neigh1 Neigh2 ....\n## Output file format:\n## Origin Destini\nn = int(sys.argv[1])\np = float(sys.argv[2])\noutputFile = sys.argv[3]\n\nG=nx.erdos_renyi_graph(n, p)\nG = max(nx.connected_component_subgraphs(G), key=len)\nG = sort_graph_by_degree(G)\nwrite_graph_gph(G, outputFile, label=False);\n\n","repo_name":"dccspeed/ripple","sub_path":"scripts/buildRandomGraph.py","file_name":"buildRandomGraph.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"73535262543","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom HyperColomn2D import HyperColumn\n\n\ndef Background(x_, y_):\n A = (x_-1.3)*0.5 + (y_-0.7)*0.8\n return A\n\n\ndef AmplitudesOfGratings(x_, y_, sigma=0.5, cent_x=0.2, cent_y=0.2):\n A = np.exp( -0.5*((x_ - cent_x)/sigma)**2 - 0.5*((y_ - cent_y)/sigma)**2 )\n return A\n\n\ndef Gratings(freq, xx, yy, sigma=0.5, cent_x=0.2, cent_y=0.2, direction=2.3, phi0=0):\n xx_rot = xx * np.cos(direction) - yy * np.sin(direction)\n #yy_rot = xx * np.sin(direction) + yy * np.cos(direction)\n image = np.cos(2 * np.pi * xx_rot * freq + phi0) * AmplitudesOfGratings(xx, yy, sigma, cent_x, cent_y) + Background(xx, yy)\n return image\n\nimage_shift_x = 0 # 0.2\nimage_shift_y = 0 # 0.2\nNx = 200\nNy = 200\n\nyy, xx = np.meshgrid(np.linspace(-1, 1, Ny), np.linspace(-1, 1, Nx))\n\nsgmGauss = 0.1\nr = np.sqrt(image_shift_x**2 + image_shift_y**2)\ndelta_x = xx[1, 0] - xx[0, 0]\ndelta_y = yy[0, 1] - yy[0, 0]\nfreq_teor_max = 0.5 / (np.sqrt(delta_x**2 + delta_y**2))\nsigma_teor_min = 1 / (2 * np.pi * freq_teor_max)\nnsigmas = 4\nsigminimum = sigma_teor_min + 0.01 * r # + 0.2*r\nsigmaximum = 100 * sigminimum # ??????? ?? r\nsigmas = np.geomspace(sigminimum, sigmaximum, nsigmas)\ndirections = np.linspace(-np.pi, np.pi, 32, endpoint=False)\nfrequencies = np.asarray([5, ])\nhc = HyperColumn(image_shift_x, image_shift_y, xx, yy, directions, sigmas, sgmGauss, frequencies=frequencies, params={})\n\n\n\n# Freq = 8\n#Direction = np.pi/5 # np.pi / 2\n# ph_0 = 0\nSgm = 0.1\n\nfor iter_test in range(10):\n\n real_encoded = {'peak_freq': np.random.uniform(1.0, 15.0), # 8,\n 'phase_0': np.random.uniform(-np.pi, np.pi), #1.5,\n 'dominant_direction': np.random.uniform(0, np.pi), #0.25*np.pi,\n }\n\n Freq = real_encoded['peak_freq']\n Direction = real_encoded['dominant_direction']\n phase_0 = real_encoded['phase_0']\n\n image = Gratings(Freq, xx, yy, sigma=Sgm, cent_x=image_shift_x, cent_y=image_shift_y, direction=Direction, phi0=phase_0)\n\n #image += np.random.normal(0, 0.2, image.shape) # !!!!\n\n calc_encoded = hc.encode(image)\n\n for key, val in real_encoded.items():\n print(key, val, calc_encoded[0][key])\n\n\n print(\"#######################################################\")\n\n\n\n # plt.imshow(image, cmap='gray')\n # plt.show()","repo_name":"ivanmysin/Vision","sub_path":"SimpleTests.py","file_name":"SimpleTests.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72903037583","text":"from past.utils import old_div\nfrom builtins import range\nfrom EMAN2 import *\nfrom math import *\nfrom os import remove\nimport time\nimport sys\n\ndef main():\n\tprogname = os.path.basename(sys.argv[0])\n\tusage = \"\"\"prog [options]\n\n\tThis program is used to produce reference-free class averages from a population of mixed,\n\tunaligned particle images. These averages can be used to generate initial models or assess\n\tthe structural variability of the data. They are not normally themselves used as part of\n\tthe single particle reconstruction refinement process, which uses the raw particles in a\n\treference-based classification approach. However, with a good structure, projections of\n\tthe final 3-D model should be consistent with the results of this reference-free analysis.\n\n\tThis variant of the program uses rotational/translational invariants derived from the bispectrum\n\tof each particle.\"\"\"\n\tparser = EMArgumentParser(usage=usage,version=EMANVERSION)\n\n\t# we grab all relevant options from e2refine.py for consistency\n\t# and snag a bunch of related code from David\n\n\t#options associated with e2refine2d.py\n\tparser.add_argument(\"--path\",type=str,default=None,help=\"Path for the refinement, default=auto\")\n\tparser.add_argument(\"--input\", default=None,type=str, help=\"The name of the file containing the particle data\", browser='EMSetsTable(withmodal=True,multiselect=False)', guitype='filebox', row=0, col=0, rowspan=1, colspan=3, mode=\"spr\")\n\tparser.add_argument(\"--ncls\", default=32, type=int, help=\"Number of classes to generate\", guitype='intbox', row=1, col=0, rowspan=1, colspan=1, mode=\"spr\")\n\tparser.add_argument(\"--alignsort\", default=False, action=\"store_true\",help=\"This will align and sort the final class-averages based on mutual similarity.\", guitype='boolbox', row=1, col=1, rowspan=1, colspan=1, mode=\"spr[True]\")\n\tparser.add_argument(\"--msamode\",default=\"pca\",type=str,help=\"e2msa can use a variety of different dimensionality reduction algorithms, the default is Principal Component Analysis (PCA), but others are available, see e2msa.py\")\n\tparser.add_argument(\"--basisrefs\",default=None,type=str,help=\"Will use a set of existing class-averages/projections to generate the Eigenbasis for classification. This must be an image stack with the same dimensions as the particle data.\")\n\tparser.add_argument(\"--normproj\", default=False, action=\"store_true\",help=\"Normalizes each projected vector into the MSA subspace. Note that this is different from normalizing the input images since the subspace is not expected to fully span the image\", guitype='boolbox', row=1, col=2, rowspan=1, colspan=1, mode=\"spr[True]\")\n#\tparser.add_argument(\"--fastseed\", action=\"store_true\", default=False,help=\"Will seed the k-means loop quickly, but may produce less consistent results. Always use this when generating >~100 classes.\",guitype='boolbox', row=1, col=2, rowspan=1, colspan=1, mode=\"spr[True]\")\n\tparser.add_argument(\"--outlierclass\", action=\"store_true\", default=False,help=\"Will turn the last class into a special class to collect outliers regardless of location\",guitype='boolbox', row=1, col=2, rowspan=1, colspan=1, mode=\"spr[False]\")\n\tparser.add_argument(\"--iter\", type=int, default=0, help = \"The total number of refinement iterations to perform\") #, guitype='intbox', row=2, col=0, rowspan=1, colspan=1, mode=\"spr\")\n\tparser.add_argument(\"--nbasisfp\",type=int,default=8,help=\"Number of MSA basis vectors to use when classifying particles\", guitype='intbox', row=2, col=1, rowspan=1, colspan=1, mode=\"spr\")\n#\tparser.add_argument(\"--automask\",default=False, action=\"store_true\",help=\"Automasking during class-averaging to help with centering when particle density is high\",guitype=\"boolbox\", row=2,col=2,rowspan=1,colspan=1,mode=\"spr\")\n#\tparser.add_argument(\"--naliref\", default=5, type=int, help=\"Number of alignment references to when determining particle orientations\", guitype='intbox', row=3, col=0, rowspan=1, colspan=1, mode=\"spr\")\n\tparser.add_argument(\"--parallel\",\"-P\",type=str,help=\"Run in parallel, specify type: