diff --git "a/1497.jsonl" "b/1497.jsonl" new file mode 100644--- /dev/null +++ "b/1497.jsonl" @@ -0,0 +1,234 @@ +{"seq_id": "13364058719", "text": "from abc import ABC\nfrom typing import Callable, Dict, Any, Awaitable\n\nfrom aiogram import Bot, Dispatcher, BaseMiddleware\n\nfrom aiogram.types import Message, CallbackQuery\n\n\nclass UtilsMiddleware(BaseMiddleware, ABC):\n\n def __init__(self, bot: Bot, dp: Dispatcher):\n super().__init__()\n self.bot = bot\n self.dp = dp\n\n async def on_pre_process_message(self, message: Message, data: dict):\n data[\"bot\"] = self.bot\n data['dp'] = self.dp\n\n async def __call__(\n self,\n handler: Callable[[Message, Dict[str, Any]], Awaitable[Any]],\n event: Message,\n data: Dict[str, Any]\n ):\n data['dp'] = self.dp\n return await handler(event, data)\n\n\nclass WorkersCallbackMiddleware(BaseMiddleware, ABC):\n\n async def __call__(\n self,\n handler: Callable[[CallbackQuery, Dict[str, Any]], Awaitable[Any]],\n event: CallbackQuery,\n data: Dict[str, Any]\n ):\n data[\"worker_id\"] = event.data.split('_')[-1]\n return await handler(event, data)\n", "repo_name": "devtolmachev/Better", "sub_path": "utils/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 1081, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "aiogram.BaseMiddleware", "line_number": 9, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 9, "usage_type": "name"}, {"api_name": "aiogram.Bot", "line_number": 11, "usage_type": "name"}, {"api_name": "aiogram.Dispatcher", "line_number": 11, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 22, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Awaitable", "line_number": 22, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 24, "usage_type": "name"}, {"api_name": "aiogram.BaseMiddleware", "line_number": 30, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 34, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Awaitable", "line_number": 34, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "41825001153", "text": "from typing import Union\r\n\r\nfrom fastapi import FastAPI\r\n\r\nfrom selenium import webdriver\r\n\r\nimport time\r\n\r\napp = FastAPI()\r\n\r\n\r\n@app.get(\"/\")\r\ndef read_root():\r\n return {\"Hello\": \"World\",\r\n \"Amber\": \"!!!\"}\r\n\r\n\r\n@app.get(\"/items/{item_id}\")\r\ndef read_item(item_id: int, q: Union[str, None] = None):\r\n return {\"item_id\": item_id, \"q\": q}\r\n\r\n\r\n@app.get(\"/selenium/{item_id}\")\r\ndef selenium(item_id: int):\r\n try:\r\n browser = webdriver.Remote(\r\n command_executor='http://xxx.xxx.xx.xx:14444/wd/hub',\r\n options=webdriver.ChromeOptions()\r\n )\r\n browser.get('https://www.google.com')\r\n browser.save_screenshot(\"./app/chrome.png\")\r\n print(browser.title)\r\n\r\n\r\n except Exception as error:\r\n print(error)\r\n finally:\r\n time.sleep(30)\r\n browser.quit()\r\n\r\n\r\n return {\"item_id\": item_id}\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "draguitar/Fastapi-SeleniumGrid", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.Remote", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "17007459273", "text": "import streamlit as st\r\nimport pandas as pd\r\nimport altair as alt\r\n\r\nst.title(\"Gender equality\")\r\n\r\n\r\nst.header(\"Gender equality\")\r\n\r\ndf = pd.read_csv(\"genderTransformed.csv\", sep=',', header=0)\r\n\r\n\r\n# a lot of missing data so will start from 1990\r\nis_selected = df['Year']>=1990\r\ndf = df[is_selected]\r\n\r\n \r\ndf = df.sort_values(by=['Country Name'])\r\ncountries = df['Country Name'].unique()\r\n\r\n\r\ncountry3 = st.selectbox('Choose country',countries)\r\nindicators = ['Gender Gap Labor force participation % (15+age)','Gender Gap Part time employment %','Gender Gap Self-employed %','Gender Gap Unemployment %','Gender Gap Vulnerable employment %','Gender Gap Wage and salaried workers %']\r\nindicator = st.selectbox('Choose Indicator',indicators)\r\n\r\n\r\n\r\nif st.button(\"Show chart 1\"):\r\n is_selected = df['Country Name']==country3\r\n df_selected = df[is_selected]\r\n df1 = df_selected[['Year', indicator]]\r\n \r\n y = indicator.split(\"%\")\r\n \r\n # lets try to deceive by increasing the domain of Y axis, so the gap looks smaller and changes smoother throught the years\r\n chart3_deceive = alt.Chart(df1).mark_area().encode(\r\n alt.X('Year:N'),\r\n\talt.Y(indicator+\":Q\",scale=alt.Scale(domain=(-100, 100))),\r\n ).properties(\r\n title={\r\n \"text\": [indicator + \" for \"+ country3]\r\n }\r\n )\r\n chart3_deceive.encoding.y.title = y[0]\r\n st.altair_chart(chart3_deceive)\r\n\r\n\r\n\r\ncountry = st.selectbox('Country',countries)\r\nif st.button(\"Show chart 3\"):\r\n is_selected = df['Country Name']==country\r\n df_selected = df[is_selected]\r\n\r\n base = alt.Chart(df_selected.reset_index()).encode(x='Year').properties(\r\n title='Gender Equality indicators for '+country\r\n )\r\n \r\n chart = alt.layer(\r\n base.mark_line(color='red').encode(y='Gender Gap Part time employment %'),\r\n base.mark_line(color='orange').encode(y='Gender Gap Self-employed %'),\r\n base.mark_line(color='green').encode(y='Gender Gap Unemployment %'),\r\n base.mark_line(color='purple').encode(y='Gender Gap Vulnerable employment %'),\r\n base.mark_line(color='pink').encode(y='Gender Gap Wage and salaried workers %')\r\n )\r\n chart.layer[0].encoding.y.title = 'Gender Gap'\r\n st.write(chart)\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n", "repo_name": "LediaIsaj/VisualAnalytics_gender_equality", "sub_path": "assignment4_deceive.py", "file_name": "assignment4_deceive.py", "file_ext": "py", "file_size_in_byte": 2265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.title", "line_number": 5, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 28, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 36, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 37, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 38, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 50, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 54, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 58, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "23392782341", "text": "from math import sqrt, ceil, floor\nfrom sys import argv\nfrom lib.File import FileParser\n\ndef palindrome(argi):\n args = str(argi)\n return args == args[::-1]\n\ndef case_logic(case_args):\n a, b = map(int, case_args[0].split())\n base_a = int(ceil(sqrt(a)))\n base_b = int(floor(sqrt(b)))\n found = 0\n for i in range(base_a, base_b+1):\n if palindrome(i) and palindrome(i * i):\n found += 1\n return str(found)\n\ndef main(args):\n parser = FileParser(1, 1, args[0])\n problem = parser.parse_problem()\n problem.set_case_logic(case_logic)\n problem.solve()\n problem.done()\n\nif __name__ == \"__main__\":\n main(argv[1:])\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_118/1790.py", "file_name": "1790.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "math.ceil", "line_number": 11, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 11, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 12, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 12, "usage_type": "call"}, {"api_name": "lib.File.FileParser", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "12970431148", "text": "import subprocess\nimport time\nimport os\nfrom datetime import datetime\n\nlast_trimmed = 0\n\ndef trimmer():\n global last_trimmed\n nodine_log_path = \"/home/inery-genesis/inery.setup/master.node/blockchain/nodine.log\"\n log_storage = \"/log_storage\"\n time_created = time.time()\n oldest_file = \"\"\n return_nodine_size = subprocess.Popen([\"sudo\", \"du\", nodine_log_path], stdout=subprocess.PIPE)\n size_text = return_nodine_size.communicate()[0].decode()\n size_f = size_text.split(\"\\t\")[0]\n subprocess.run([\"echo\", \"Size of file \", str(size_f), \"KB\\n\", \"Last trimmed \", str(last_trimmed), \" / \", datetime.fromtimestamp(last_trimmed).strftime(\"%A, %B %d, %Y %I:%M:%S\")])\n if (float(size_f) > 210000000) or ((time.time() - last_trimmed) > 432000):\n for root, dirs, files in os.walk(log_storage):\n if len(files) >= 5:\n for file in files:\n if file.endswith(\".zip\"):\n if os.path.getctime(os.path.join(root, file)) < time_created:\n oldest_file = os.path.join(root, file)\n time_created = os.path.getctime(os.path.join(root, file))\n if oldest_file != \"\":\n subprocess.run([\"sudo\", \"rm\", \"-rf\", oldest_file])\n oldest_file = \"\"\n subprocess.run([\"sudo\", \"zip\", \"-r\", log_storage + \"/nodinelog\" + str(time.time()) + \".zip\", nodine_log_path])\n subprocess.run([\"echo\", \"Backuped nodine.log file \", str(datetime.now())])\n subprocess.run([\"truncate\", \"-s\", \"0\", nodine_log_path])\n last_trimmed = time.time()\n\nif __name__ == \"__main__\":\n while(True):\n try:\n trimmer()\n except Exception as e:\n print(e)\n pass\n time.sleep(3600)\n", "repo_name": "vanja032/LogCleaner", "sub_path": "LogTrimmer.py", "file_name": "LogTrimmer.py", "file_ext": "py", "file_size_in_byte": 1780, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.getctime", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.getctime", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 29, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 31, "usage_type": "call"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "30085365153", "text": "from django.contrib import messages\nfrom django.shortcuts import redirect, render\nimport bcrypt\nfrom .decorators import login_required\nfrom .models import *\n\n@login_required\ndef index(request):\n current_user = request.session['user']['id']\n owner = Trip.objects.all().filter(manager=current_user)\n travelers = Trip.objects.all().filter(traveler=current_user).exclude(manager=current_user)\n trips = Trip.objects.all().exclude(traveler=current_user).exclude(manager=current_user)\n context = {\n \"owners\" : owner,\n \"travelers\": travelers,\n \"trips\" : trips\n }\n return render(request, 'travel_section.html', context)\n\n@login_required\ndef add(request):\n\n return render(request, 'add.html')\n\ndef add_trip(request):\n try:\n datetime.datetime.strptime(request.POST['start_date'], '%Y-%m-%d')\n except:\n messages.error(request, \"ingrese una fecha\")\n return redirect(\"/add\")\n try:\n datetime.datetime.strptime(request.POST['end_date'], '%Y-%m-%d')\n except:\n messages.error(request, \"ingrese una fecha\")\n return redirect(\"/add\")\n errors = Trip.objects.validador_basico(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(\"/add\")\n \n current_user = request.session['user']['id']\n in_manager = User.objects.get(id=current_user)\n # in_traveler = User.objects.get(id=current_user)\n in_destination=request.POST['destination']\n in_description=request.POST['description']\n in_start_date=request.POST['start_date']\n in_end_date=request.POST['end_date']\n new_trip = Trip.objects.create(manager=in_manager,\n # traveler=in_traveler,\n destination=in_destination,\n description=in_description,\n start_date=in_start_date,\n end_date=in_end_date)\n \n new_trip.traveler.add(in_manager)\n\n return redirect(\"/travels\")\n\n# author.books.remove(book)\n# INSTANCIA_TRIP . VARIABLE OBJETIVO . COMANDO ( INSTANCIA_USER )\n\ndef cancel_trip(request, trip_id):\n current_user_id = request.session['user']['id']\n current_user = User.objects.get(id=current_user_id)\n trip = Trip.objects.get(id=int(trip_id))\n trip.traveler.remove(current_user)\n return redirect(\"/travels\")\n\ndef delete_trip(request, trip_id):\n Trip.objects.filter(id=int(trip_id)).delete()\n return redirect(\"/travels\")\n\ndef join_trip(request, trip_id):\n current_user_id = request.session['user']['id']\n current_user = User.objects.get(id=current_user_id)\n trip = Trip.objects.get(id=int(trip_id))\n trip.traveler.add(current_user)\n return redirect(\"/travels\")\n\ndef view_trip(request, trip_id):\n trip = Trip.objects.get(id=int(trip_id))\n manager = User.objects.filter(managers = trip)\n travelers = User.objects.filter(travelers = trip).exclude(managers = trip)\n\n context = {\n \"trip\":trip,\n \"manager\":manager,\n \"travelers\":travelers\n }\n\n return render(request, 'view.html', context)", "repo_name": "RLAlfaro/travel_buddy", "sub_path": "main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "decorators.login_required", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "decorators.login_required", "line_number": 20, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 34, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "24285017621", "text": "# For all preprocessing methods applied on a feature, $D_{*j}$, in $D$, what is the change in feature spread.\n# Input: D - dataframe\n\nimport pymongo\nimport pandas as pd\nimport pprint\nimport time\nimport sys\nimport random\n\ndef get_random_feature():\n\t# Get output_entities collection\n\toutput_entities = db['output_entities']\n\n\t# Get random document on output_entities collection\n\trandom_ent = list(output_entities.aggregate([{'$sample': {'size': 1}}]))\n\t\n\t# Get feature_name\n\tfeature_name = random_ent[0]['attributes']['feature_name']\n\t#feature_name = 'checking'\n\n\treturn feature_name\n\ndef get_dataset_spread(invalid_items, new_items):\n\t# Get all preprocessing methods id:\n\tact_new = [e['_id'] for e in new_items]\n\tact_invalid = [e['_id'] for e in invalid_items]\n\tacts_id = list(set(act_new + act_invalid))\n\n\t# Iterate all preprocessing methods related to feature $D_{*j}$:\n\tfor i in acts_id:\n\t\t# Get the activity identifier:\n\t\tact_identifier = i\n\t\t# Get the related invalidated entities id:\n\t\tinvalidated_entities = [e['invalidated_entities'] for e in invalid_items if e['_id'] == act_identifier]\n\n\t\tif invalidated_entities:\n\t\t\tinvalidated_entities = invalidated_entities[0]\n\t\t\t# Get the values of the invalidated entities\n\t\t\tinvalid_values = entities.aggregate([ \\\n\t\t\t\t{'$match': {'identifier':{'$in':invalidated_entities}}}, \\\n\t\t\t\t{'$project': {'invalid_values': '$attributes.value', '_id': 0}} \\\n\t\t\t])\n\t\t\tinvalid_values = list(invalid_values)\n\t\t\tinvalid_values = [d['invalid_values'] for d in invalid_values]\n\n\t\t# Get the related new entities id:\n\t\tnew_entities = [e['new_entities'] for e in new_items if e['_id'] == act_identifier]\n\n\t\tif new_entities:\n\t\t\tnew_entities = new_entities[0]\n\t\t\t# Get the values of the new entities\n\t\t\tnew_values = entities.aggregate([ \\\n\t\t\t\t{'$match': {'identifier':{'$in':new_entities}}}, \\\n\t\t\t\t{'$project': {'new_values': '$attributes.value', '_id': 0}} \\\n\t\t\t])\n\t\t\tnew_values = list(new_values)\n\t\t\tnew_values = [d['new_values'] for d in new_values]\n\n\t\tprint('------------------------------------------')\n\t\tprint('Activity identifier: ' + str(act_identifier))\n\t\tprint('Number of invalidated items: ' + str(len(invalidated_entities)))\n\t\tprint('Number of new items: ' + str(len(new_entities)))\n\t\tif invalidated_entities:\n\t\t\tprint('Max invalid_values ' + max(invalid_values))\n\t\tif new_entities:\n\t\t\tprint('Max new_values ' + max(new_values))\n\n\ndef get_items(feature_name):\n\t# Get activities relatetd to feature_name\n\tacts = activities.find({'attributes.features_name': {'$regex': '.*' + feature_name + '*.'}}, {'identifier': 1, '_id': 0}).distinct('identifier')\n\n\t# Get invalidated items for all preprocessing methods related to feature $D_{*j}$:\n\tinvalid_items = relations.aggregate([ \\\n\t\t{'$match': {'prov:relation_type': 'wasInvalidatedBy', 'prov:activity': {'$in': acts}}}, \\\n\t\t{'$group': {'_id': '$prov:activity', 'invalidated_entities': {'$addToSet': '$prov:entity'}}} \n\t])\n\n\t# Get new items for all preprocessing methods related to feature $D_{*j}$:\n\tnew_items = relations.aggregate([ \\\n\t\t{'$match': {'prov:relation_type': 'wasGeneratedBy', 'prov:activity': {'$in': acts}}}, \\\n\t\t{'$group': {'_id': '$prov:activity', 'new_entities': {'$addToSet': '$prov:entity'}}} \n\t])\n\n\treturn (invalid_items, new_items)\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) == 2 :\n\t\tdbname = sys.argv[1]\n\n\t\t# Connect with MongoClient on the default host and port:\n\t\tclient = pymongo.MongoClient('localhost', 27017)\n\n\t\t# Getting a Database:\n\t\tdb = client[dbname]\n\n\t\t# Get entities, activities and relations mongodb collection:\n\t\tentities = db.entities\n\t\tactivities = db.activities\n\t\trelations = db.relations\n\n\t\tfeature_name = get_random_feature()\n\n\t\tprint('Feature Spread of: ' + feature_name)\n\n\t\ttime1 = time.time()\n\t\tinvalid_items, new_items = get_items(feature_name)\n\t\tinvalid_items = list(invalid_items)\n\t\tnew_items = list(new_items)\n\n\t\tif ((invalid_items) or (new_items)):\n\t\t\tget_dataset_spread(invalid_items, new_items)\n\t\telse:\n\t\t\tprint('No operation on '+ feature_name )\n\n\t\ttime2 = time.time()\n\n\t\ttext = '{:s} function took {:.3f} sec.'.format('Feature Spread', (time2-time1))\n\t\tprint(text)\n\t\t\n\t\t# Close Mongodb connection:\n\t\tclient.close()\n\telse:\n\t\tprint('[ERROR] usage: feture_spread.py ')", "repo_name": "GiuliaSim/DataProvenance", "sub_path": "queries/feature_spread.py", "file_name": "feature_spread.py", "file_ext": "py", "file_size_in_byte": 4218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.argv", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "18052222995", "text": "from django import forms\n\nfrom django_summernote.widgets import SummernoteWidget\n\nfrom journal import models\n\nclass SubmissionForm(forms.ModelForm):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(SubmissionForm, self).__init__(*args, **kwargs)\n\t\tself.fields['subtitle'].required = False\n\t\tself.fields['manuscript_file'].required = False\n\n\tdef save(self, commit=True, request=None):\n\t\tarticle = super(SubmissionForm, self).save(commit=False)\n\t\tarticle.owner = request.user\n\n\t\tif commit:\n\t\t\tarticle.save()\n\n\t\treturn article\n\n\n\tclass Meta:\n\t\tmodel = models.Article\n\t\texclude = ('owner', 'authors', 'date_submitted', 'date_published', 'open_for_comments', 'doi')\n\t\twidgets = {\n\t\t\t'abstract': SummernoteWidget(),\n\t\t}\n\n\n", "repo_name": "ajrbyers/panacea", "sub_path": "src/journal/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "journal.models.Article", "line_number": 25, "usage_type": "attribute"}, {"api_name": "journal.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django_summernote.widgets.SummernoteWidget", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "23599048851", "text": "#!/usr/bin/env python\n\nimport sys, math\nimport fractions\nfrom itertools import repeat, count, cycle, ifilter, ifilterfalse, imap, starmap, tee, izip, product, combinations, permutations\nfrom collections import defaultdict\nfrom operator import itemgetter\n\n\ndef mapInstance( foo, istream ):\n N = int( istream.readline() )\n idata = []\n for i in xrange(N):\n idata.append( map( int, istream.readline().strip() )[::-1] )\n return foo( idata )\n \ndef mapInput( foo, preproc = None, istream = sys.stdin, ostream = sys.stdout ):\n N = map( int, istream.readline().split() )[0]\n if preproc:\n for i in xrange(D):\n preproc( istream.readline().split() )\n odata = starmap( mapInstance, repeat( ( foo, istream ), N ) )\n for i, d in enumerate( odata ):\n print >>sys.stderr, \"Case #%d\" % ( i+1 )\n print >>ostream, \"Case #%d: %s\" % ( i+1, d )\n \nclass showfunction:\n def __init__( self, foo ):\n self.foo = foo\n \n def __call__( self, *args ):\n result = self.foo( *args )\n print >>sys.stderr, args, result\n return result\n\nclass cachedfunction:\n def __init__( self, foo ):\n self.foo = foo\n self.cache = {}\n \n def __call__( self, *args ):\n if args in self.cache:\n return self.cache[args]\n else:\n result = self.cache[args] = self.foo( *args )\n return result\n\ndef solve( idata ):\n idata = map( lambda x: x+[1], idata )\n rows = map( lambda x: x.index(1), idata )\n \n #@cachedfunction\n def rek(n, visited):\n if n == 0:\n return 0\n \n swap = -1\n mini = 10000\n for i in range(len(rows)):\n if i in visited: continue\n swap += 1\n if rows[i] < n: continue\n this = swap + rek(n-1, visited | set([i]))\n if this < mini:\n mini = this\n return mini\n \n return str( rek(len(rows)-1, set()) )\n \ndef main( args ):\n mapInput( solve )\n\nif __name__ == \"__main__\":\n main( sys.argv )\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_46/104.py", "file_name": "104.py", "file_ext": "py", "file_size_in_byte": 1877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.stdin", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 17, "usage_type": "attribute"}, {"api_name": "itertools.starmap", "line_number": 22, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "74179538434", "text": "from typing import Callable\n\nfrom hypothesis import given\n\nfrom lz.logical import disjoin\nfrom tests import strategies\nfrom tests.hints import Domain\n\n\n@given(strategies.predicates, strategies.predicates_arguments)\ndef test_idempotency(predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n self_disjunction = disjoin(predicate)\n\n result = self_disjunction(predicate_argument)\n\n assert result is predicate(predicate_argument)\n\n\n@given(strategies.predicates, strategies.true_predicates,\n strategies.predicates_arguments)\ndef test_absorbing_element(predicate: Callable[[Domain], bool],\n true_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n left_disjunction = disjoin(predicate, true_predicate)\n right_disjunction = disjoin(true_predicate, predicate)\n\n left_result = left_disjunction(predicate_argument)\n right_result = right_disjunction(predicate_argument)\n\n assert left_result is right_result is true_predicate(predicate_argument)\n\n\n@given(strategies.predicates, strategies.false_predicates,\n strategies.predicates_arguments)\ndef test_neutral_element(predicate: Callable[[Domain], bool],\n false_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n left_disjunction = disjoin(predicate, false_predicate)\n right_disjunction = disjoin(false_predicate, predicate)\n\n left_result = left_disjunction(predicate_argument)\n right_result = right_disjunction(predicate_argument)\n\n assert left_result is right_result is predicate(predicate_argument)\n\n\n@given(strategies.predicates, strategies.predicates,\n strategies.predicates_arguments)\ndef test_commutativity(left_predicate: Callable[[Domain], bool],\n right_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n left_disjunction = disjoin(left_predicate, right_predicate)\n right_disjunction = disjoin(right_predicate, left_predicate)\n\n left_result = left_disjunction(predicate_argument)\n right_result = right_disjunction(predicate_argument)\n\n assert left_result is right_result\n\n\n@given(strategies.predicates, strategies.predicates, strategies.predicates,\n strategies.predicates_arguments)\ndef test_associativity(left_predicate: Callable[[Domain], bool],\n mid_predicate: Callable[[Domain], bool],\n right_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n first_disjunction = disjoin(disjoin(left_predicate, mid_predicate),\n right_predicate)\n second_disjunction = disjoin(left_predicate, disjoin(mid_predicate,\n right_predicate))\n\n assert (first_disjunction(predicate_argument)\n is second_disjunction(predicate_argument))\n", "repo_name": "lycantropos/lz", "sub_path": "tests/logical_tests/test_disjoin.py", "file_name": "test_disjoin.py", "file_ext": "py", "file_size_in_byte": 2994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Callable", "line_number": 11, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 11, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 12, "usage_type": "name"}, {"api_name": "lz.logical.disjoin", "line_number": 13, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 10, "usage_type": "call"}, {"api_name": "tests.strategies.predicates", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 10, "usage_type": "name"}, {"api_name": "tests.strategies.predicates_arguments", "line_number": 10, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 22, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 23, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 23, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 24, "usage_type": "name"}, {"api_name": "lz.logical.disjoin", "line_number": 25, "usage_type": "call"}, {"api_name": "lz.logical.disjoin", "line_number": 26, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 20, "usage_type": "call"}, {"api_name": "tests.strategies.predicates", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 20, "usage_type": "name"}, {"api_name": "tests.strategies.true_predicates", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tests.strategies.predicates_arguments", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 36, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 37, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 37, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 38, "usage_type": "name"}, {"api_name": "lz.logical.disjoin", "line_number": 39, "usage_type": "call"}, {"api_name": "lz.logical.disjoin", "line_number": 40, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.strategies.predicates", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 34, "usage_type": "name"}, {"api_name": "tests.strategies.false_predicates", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tests.strategies.predicates_arguments", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 50, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 51, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 51, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 52, "usage_type": "name"}, {"api_name": "lz.logical.disjoin", "line_number": 53, "usage_type": "call"}, {"api_name": "lz.logical.disjoin", "line_number": 54, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 48, "usage_type": "call"}, {"api_name": "tests.strategies.predicates", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 48, "usage_type": "name"}, {"api_name": "tests.strategies.predicates_arguments", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 64, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 65, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 66, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 66, "usage_type": "name"}, {"api_name": "tests.hints.Domain", "line_number": 67, "usage_type": "name"}, {"api_name": "lz.logical.disjoin", "line_number": 68, "usage_type": "call"}, {"api_name": "lz.logical.disjoin", "line_number": 70, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 62, "usage_type": "call"}, {"api_name": "tests.strategies.predicates", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 62, "usage_type": "name"}, {"api_name": "tests.strategies.predicates_arguments", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tests.strategies", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "1549062369", "text": "\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nimport os\nimport utm\nimport rasterio\nfrom tqdm import tqdm\n#from xml.etree import ElementTree as et\nimport xmltodict\n\n##\ndef cloud_masking(image,cld):\n cloud_mask = cld > 30\n band_mean = image.mean()\n image[cloud_mask] = band_mean\n return image\n\n##\ndef load_file(fp):\n \"\"\"Takes a PosixPath object or string filepath\n and returns np array\"\"\"\n\n return np.array(Image.open(fp.__str__()))\n\ndef paths (name): \n\n fold_band_10 = glob(name+\"/GRANULE/*/IMG_DATA/R10m\")[0]\n fold_band_20 = glob(name+\"/GRANULE/*/IMG_DATA/R20m\")[0]\n fold_band_60 = glob(name+\"/GRANULE/*/IMG_DATA/R60m\")[0]\n path = name+\"/GRANULE/*/IMG_DATA/R10m\"+\"/*.jp2\"\n x = glob(path)\n lists = x[0].split(\"/\")[-1].split(\"_\")\n fixe = lists[0]+'_'+lists[1]\n\n band_10 = ['B02', 'B03', 'B04','B08']\n band_20 = ['B05', 'B06', 'B07','B8A','B11', 'B12']\n band_60 = ['B01','B09']\n images_name_10m = [fixe+\"_\"+band+\"_10m.jp2\" for band in band_10 ]\n images_name_20m = [fixe+\"_\"+band+\"_20m.jp2\" for band in band_20 ]\n images_name_60m = [fixe+\"_\"+band+\"_60m.jp2\" for band in band_60 ]\n #\n bandes_path_10 = [os.path.join(fold_band_10,img) for img in images_name_10m]\n bandes_path_20 = [os.path.join(fold_band_20,img) for img in images_name_20m]\n bandes_path_60 = [os.path.join(fold_band_60,img) for img in images_name_60m]\n #\n tile_path = name+\"/INSPIRE.xml\"\n path_cld_20 = glob(name+\"/GRANULE/*/QI_DATA/MSK_CLDPRB_20m.jp2\")[0]\n path_cld_60 = glob(name+\"/GRANULE/*/QI_DATA/MSK_CLDPRB_60m.jp2\")[0]\n\n return bandes_path_10,bandes_path_20,bandes_path_60,tile_path,path_cld_20,path_cld_60\n\n##\ndef coords_to_pixels(ref, utm, m=10):\n \"\"\" Convert UTM coordinates to pixel coordinates\"\"\"\n\n x = int((utm[0] - ref[0])/m)\n y = int((ref[1] - utm[1])/m)\n\n return x, y\n\n##\ndef extract_sub_image(bandes_path,tile_path,area,resolution=10, d= 3, cld_path = None):\n \n xml_file=open(tile_path,\"r\")\n xml_string=xml_file.read()\n python_dict=xmltodict.parse(xml_string)\n tile_coordonnates = python_dict[\"gmd:MD_Metadata\"][\"gmd:identificationInfo\"][\"gmd:MD_DataIdentification\"][\"gmd:abstract\"][\"gco:CharacterString\"].split()\n\n # S2 tile coordonnates\n lat,lon = float(tile_coordonnates[0]),float(tile_coordonnates[1])\n tile_coordonnate = [lat,lon]\n\n refx, refy, _, _ = utm.from_latlon(tile_coordonnate[0], tile_coordonnate[1])\n ax,ay,_,_ = utm.from_latlon(area[1],area[0]) # lat,lon\n \n ref = [refx, refy]\n utm_cord = [ax,ay]\n x,y = coords_to_pixels(ref,utm_cord,resolution)\n \n images = []\n # sub_image_extraction\n for band_path in tqdm(bandes_path, total=len(bandes_path)):\n image = load_file(band_path).astype(np.float32)\n if resolution==60:\n sub_image = image[y,x]\n images.append(sub_image)\n \n else:\n sub_image = image[y-d:y+d,x-d:x+d]\n images.append(sub_image)\n\n images = np.array(images)\n \n\n # verify if the study are is cloudy\n if cld_path is not None:\n cld_mask = load_file(cld_path).astype(np.float32)\n cld = cld_mask[y-d:y+d,x-d:x+d]\n # cloud removing\n images = cloud_masking(images,cld)\n\n if resolution==60:\n return images\n else:\n return images.mean((1,2))\n \n\ndef ndvi(area, tile_name):\n \"\"\"\n polygone: (lon,lat) format\n tile_name: name of tile with the most low cloud coverage\n \"\"\"\n #Extract tile coordonnates (lat,long)\n tile_path = tile_name+\"/INSPIRE.xml\"\n xml_file=open(tile_path,\"r\")\n xml_string=xml_file.read()\n python_dict=xmltodict.parse(xml_string)\n tile_coordonnates = python_dict[\"gmd:MD_Metadata\"][\"gmd:identificationInfo\"][\"gmd:MD_DataIdentification\"][\"gmd:abstract\"][\"gco:CharacterString\"].split()\n\n # S2 tile coordonnates\n lat,lon = float(tile_coordonnates[0]),float(tile_coordonnates[1])\n tile_coordonnate = [lat,lon]\n\n refx, refy, _, _ = utm.from_latlon(tile_coordonnate[0], tile_coordonnate[1])\n ax,ay,_,_ = utm.from_latlon(area[1],area[0]) # lat,lon\n \n ref = [refx, refy]\n utm_cord = [ax,ay]\n x,y = coords_to_pixels(ref,utm_cord)\n\n # read images\n path_4 = tile_name+\"/GRANULE/*/IMG_DATA/R10m/*_B04_10m.jp2\"\n path_8 = tile_name+\"/GRANULE/*/IMG_DATA/R10m/*_B08_10m.jp2\"\n red_object = rasterio.open(glob(path_4)[0])\n nir_object = rasterio.open(glob(path_8)[0])\n red = red_object.read()\n nir = nir_object.read()\n red,nir = red[0],nir[0]\n # extract area and remove unsigne\n sub_red = red[y-3:y+3,x-3:x+3].astype(np.float16)\n sub_nir = nir[y-3:y+3,x-3:x+3].astype(np.float16)\n \n # NDVI\n ndvi_image = ((sub_nir - sub_red)/(sub_nir+sub_red))\n ndvi_mean_value = ndvi_image.mean()\n \n return ndvi_mean_value\n ", "repo_name": "data354/Biomass", "sub_path": "processing.py", "file_name": "processing.py", "file_ext": "py", "file_size_in_byte": 4785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 31, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 32, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 50, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 51, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 69, "usage_type": "call"}, {"api_name": "utm.from_latlon", "line_number": 76, "usage_type": "call"}, {"api_name": "utm.from_latlon", "line_number": 77, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 100, "usage_type": "attribute"}, {"api_name": "xmltodict.parse", "line_number": 120, "usage_type": "call"}, {"api_name": "utm.from_latlon", "line_number": 127, "usage_type": "call"}, {"api_name": "utm.from_latlon", "line_number": 128, "usage_type": "call"}, {"api_name": "rasterio.open", "line_number": 137, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 137, "usage_type": "call"}, {"api_name": "rasterio.open", "line_number": 138, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 144, "usage_type": "attribute"}]} +{"seq_id": "43258391862", "text": "import unicodedata\nimport re\nimport json\n\nimport nltk\nfrom nltk.tokenize.toktok import ToktokTokenizer\nfrom nltk.corpus import stopwords\n\nimport pandas as pd\n\ndef basic_clean(string):\n string = string.lower()\n string = (unicodedata.normalize('NFKD', string)\n .encode('ascii', 'ignore')\n .decode('utf-8', 'ignore')\n )\n string = re.sub(r\"[^a-z0-9'\\s]\", '', string)\n return string\n\ndef tokenize(string):\n tokenizer = nltk.tokenize.ToktokTokenizer()\n return tokenizer.tokenize(string, return_str=True)\n\ndef stem(string):\n ps = nltk.porter.PorterStemmer()\n stems = [ps.stem(word) for word in string.split()]\n return ' '.join(stems)\n\ndef lemmatize(string):\n wnl = nltk.stem.WordNetLemmatizer()\n lemmas = [wnl.lemmatize(word) for word in string.split()]\n return ' '.join(lemmas)\n\ndef remove_stopwords(string, extra_words=[], exclude_words=[]):\n stopword_list = stopwords.words('english')\n \n for word in extra_words:\n stopword_list.append(word)\n \n for word in exclude_words:\n stopword_list.remove(word)\n \n words = string.split()\n filtered_words = [word for word in words if word not in stopword_list]\n return ' '.join(filtered_words)\n\ndef prepare_article_data(df, column):\n clean_tokens = df[column].apply(basic_clean).apply(tokenize)\n df['stemmed'] = clean_tokens.apply(stem)\n df['lemmatized'] = clean_tokens.apply(lemmatize)\n df['clean'] = clean_tokens.apply(remove_stopwords)\n return df", "repo_name": "RyanMcCall/natural-language-processing", "sub_path": "prepare.py", "file_name": "prepare.py", "file_ext": "py", "file_size_in_byte": 1537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "unicodedata.normalize", "line_number": 13, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.tokenize.ToktokTokenizer", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 21, "usage_type": "attribute"}, {"api_name": "nltk.porter.PorterStemmer", "line_number": 25, "usage_type": "call"}, {"api_name": "nltk.porter", "line_number": 25, "usage_type": "attribute"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 30, "usage_type": "call"}, {"api_name": "nltk.stem", "line_number": 30, "usage_type": "attribute"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 35, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "15879267959", "text": "import bpy\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom modules.mesh_test import SpecMeshTest, OperatorSpecEditMode, RunTest\n\n\ndef main():\n tests = [\n\n SpecMeshTest('Cubecube_intersect_union', 'Cubecube', 'Cubecube_result_1',\n [OperatorSpecEditMode('intersect_boolean',\n {'operation': 'UNION', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_intersect', 'Cubecube', 'Cubecube_result_2',\n [OperatorSpecEditMode('intersect_boolean', {'operation': 'INTERSECT', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_difference', 'Cubecube', 'Cubecube_result_3',\n [OperatorSpecEditMode('intersect_boolean', {'operation': 'DIFFERENCE', 'solver': 'FAST'}, 'FACE',\n {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_cut', 'Cubecube', 'Cubecube_result_4', [OperatorSpecEditMode('intersect',\n {'separate_mode': 'CUT', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_all', 'Cubecube', 'Cubecube_result_5',\n [OperatorSpecEditMode('intersect',\n {'separate_mode': 'ALL', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_none', 'Cubecube', 'Cubecube_result_6',\n [OperatorSpecEditMode('intersect',\n {'separate_mode': 'NONE', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_select_none', 'Cubecube',\n 'Cubecube_result_7',\n [OperatorSpecEditMode('intersect',\n {'mode': 'SELECT', 'separate_mode': 'NONE', 'solver': 'FAST'}, 'FACE',\n {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, )]),\n SpecMeshTest('Cubecone_intersect_union', 'Cubecone', 'Cubecone_result_1',\n [OperatorSpecEditMode('intersect_boolean',\n {'operation': 'UNION', 'solver': 'FAST'}, 'FACE', {6, 7, 8, 9, 10}, )]),\n SpecMeshTest('Cubecones_intersect_union', 'Cubecones', 'Cubecones_result_1',\n [OperatorSpecEditMode('intersect_boolean', {'operation': 'UNION', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n\n ]\n\n operator_test = RunTest(tests)\n\n command = list(sys.argv)\n for i, cmd in enumerate(command):\n if cmd == \"--run-all-tests\":\n operator_test.do_compare = True\n operator_test.run_all_tests()\n break\n elif cmd == \"--run-test\":\n name = command[i + 1]\n operator_test.do_compare = False\n operator_test.run_test(name)\n break\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "blender/blender", "sub_path": "tests/python/boolean_operator.py", "file_name": "boolean_operator.py", "file_ext": "py", "file_size_in_byte": 3089, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10105, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 12, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 13, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 15, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 16, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 17, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 18, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 20, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 20, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 22, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 23, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 25, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 26, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 28, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 30, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 33, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 34, "usage_type": "call"}, {"api_name": "modules.mesh_test.SpecMeshTest", "line_number": 36, "usage_type": "call"}, {"api_name": "modules.mesh_test.OperatorSpecEditMode", "line_number": 37, "usage_type": "call"}, {"api_name": "modules.mesh_test.RunTest", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "203728739", "text": "import asyncio\nimport time\nurls = [\"www.baidu.com\", \"www.sogou.com\", \"www.goubanjia.com\"]\n\n# Cannot use sync models in async\nasync def makeRequest(url):\n print(\"Requesting: \", url)\n await asyncio.sleep(2)\n print(\"Succeded\")\n return url\n\n# Get the instance returned from an async function\n# c = makeRequest(\"www.baidu.com\")\n\n# Establish event loop\n# loop = asyncio.get_event_loop()\n\n# register the instance into loop\n# loop.run_until_complete(c)\n\n# # using task, depend on loop\n# loop = asyncio.get_event_loop()\n# # establish a task instance\n# task = loop.create_task(c)\n# print(task)\n\n# loop.run_until_complete(task)\n\n# print(task)\n\n# using future, does not depend on loop\n# loop = asyncio.get_event_loop()\n# task = asyncio.ensure_future(c)\n# print(task)\n# loop.run_until_complete(task)\n# print(task)\n\ndef callbackFunc(task):\n print(task.result())\n\n# Initate multiple task instances\ntasks = []\nfor u in urls:\n c = makeRequest(u)\n tasks.append(asyncio.ensure_future(c))\n\n\nloop = asyncio.get_event_loop()\n# task = asyncio.ensure_future(c)\n# callback\n# task.add_done_callback(callbackFunc)\nloop.run_until_complete(asyncio.wait(tasks))\n", "repo_name": "YufeiLinUlysses/LearnDataScience", "sub_path": "Data Collection/Web_Crawler/asyncCrawler2.py", "file_name": "asyncCrawler2.py", "file_ext": "py", "file_size_in_byte": 1153, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "asyncio.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 45, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 48, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "8060624317", "text": "from fastapi import FastAPI, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nimport sentry_sdk\nfrom sentry_sdk.integrations.starlette import StarletteIntegration\nfrom sentry_sdk.integrations.fastapi import FastApiIntegration\n\nfrom src.driver.session import SessionManager\nfrom src.routers import (\n user_router,\n product_router,\n predictor_router,\n)\nfrom .di import injector\nfrom .exceptions import BaseException\nfrom .constants import (\n SENTRY_DSN,\n APP_ENV,\n)\n\napp = FastAPI()\n\nsentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[\n StarletteIntegration(),\n FastApiIntegration(),\n ],\n environment=APP_ENV,\n send_default_pii=True,\n attach_stacktrace=True,\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production,\n traces_sample_rate=0.5,\n)\n\napp.include_router(user_router)\napp.include_router(product_router)\napp.include_router(predictor_router)\napp.add_middleware(\n CORSMiddleware,\n allow_origins = [\"*\"],\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*'],\n)\n\n\n@app.on_event(\"startup\")\ndef startup():\n client = injector.get(SessionManager).get_client()\n print(\"[Startup] Connecting to the MongoDB database ..\")\n print(client.server_info())\n print(\"[Startup] Connected to the MongoDB database!\")\n\n\n@app.on_event(\"shutdown\")\ndef shutdown():\n client = injector.get(SessionManager).get_client()\n print(\"[Shutdown] Disconnecting from the MongoDB database ..\")\n client.close()\n print(\"[Shutdown] Disconnected from the MongoDB database!\")\n\n\n@app.exception_handler(Exception)\ndef unicorn_exception_handler(request: Request, exc: BaseException):\n code = getattr(exc, \"code\", 500)\n message = getattr(exc, \"message\", str(exc))\n print(code, message)\n return JSONResponse(\n status_code=code,\n content=message,\n )\n\n\n@app.get(\"/ping\")\nasync def ping():\n return {\"message\": \"pong\"}\n", "repo_name": "devcamp18/group18-monorepo", "sub_path": "backend/src/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2057, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 21, "usage_type": "call"}, {"api_name": "sentry_sdk.init", "line_number": 23, "usage_type": "call"}, {"api_name": "constants.SENTRY_DSN", "line_number": 24, "usage_type": "name"}, {"api_name": "sentry_sdk.integrations.starlette.StarletteIntegration", "line_number": 26, "usage_type": "call"}, {"api_name": "sentry_sdk.integrations.fastapi.FastApiIntegration", "line_number": 27, "usage_type": "call"}, {"api_name": "constants.APP_ENV", "line_number": 29, "usage_type": "name"}, {"api_name": "src.routers.user_router", "line_number": 38, "usage_type": "argument"}, {"api_name": "src.routers.product_router", "line_number": 39, "usage_type": "argument"}, {"api_name": "src.routers.predictor_router", "line_number": 40, "usage_type": "argument"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 42, "usage_type": "argument"}, {"api_name": "di.injector.get", "line_number": 52, "usage_type": "call"}, {"api_name": "src.driver.session.SessionManager", "line_number": 52, "usage_type": "argument"}, {"api_name": "di.injector", "line_number": 52, "usage_type": "name"}, {"api_name": "di.injector.get", "line_number": 60, "usage_type": "call"}, {"api_name": "src.driver.session.SessionManager", "line_number": 60, "usage_type": "argument"}, {"api_name": "di.injector", "line_number": 60, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 67, "usage_type": "name"}, {"api_name": "exceptions.BaseException", "line_number": 67, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "43279855380", "text": "\"\"\"adding changes to upvote and downvote\n\nRevision ID: d0107b64a60f\nRevises: 773c607ae25f\nCreate Date: 2022-05-11 13:16:40.515449\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd0107b64a60f'\ndown_revision = '773c607ae25f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('pitch_id', sa.Integer(), nullable=True))\n op.drop_constraint('comments_pitch_fkey', 'comments', type_='foreignkey')\n op.create_foreign_key(None, 'comments', 'pitches', ['pitch_id'], ['id'])\n op.drop_column('comments', 'pitch')\n op.add_column('downvotes', sa.Column('user_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'downvotes', 'users', ['user_id'], ['id'])\n op.add_column('upvotes', sa.Column('user_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'upvotes', 'users', ['user_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'upvotes', type_='foreignkey')\n op.drop_column('upvotes', 'user_id')\n op.drop_constraint(None, 'downvotes', type_='foreignkey')\n op.drop_column('downvotes', 'user_id')\n op.add_column('comments', sa.Column('pitch', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'comments', type_='foreignkey')\n op.create_foreign_key('comments_pitch_fkey', 'comments', 'pitches', ['pitch'], ['id'])\n op.drop_column('comments', 'pitch_id')\n # ### end Alembic commands ###\n", "repo_name": "Fridah-kalee/Pitches", "sub_path": "migrations/versions/d0107b64a60f_adding_changes_to_upvote_and_downvote.py", "file_name": "d0107b64a60f_adding_changes_to_upvote_and_downvote.py", "file_ext": "py", "file_size_in_byte": 1647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_constraint", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op.drop_constraint", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "32061455293", "text": "import numpy as np \nimport scipy.io.wavfile as wav\nimport matplotlib.pyplot as plt\n\ns=np.random.uniform(-1,1,499)\nn=np.arange(0,499,1)\nx1=np.sin(0.1*np.pi*n)\nx=np.sin(0.1*np.pi*n)+s\nplt.stem(n,x1)\nplt.show()\n\nplt.stem(n,x)\nplt.show()\nk=np.arange(-499,498)\nac=sig.correlate(x,x)\nplt.plot(k,ac)\nplt.show()\n\ni=np.ones(500)\n\nk1=np.arange(-499,499)\ncc=sig.correlate(s,i)\nplt.plot(k1,cc)\nplt.show()\n\n", "repo_name": "yashbee313839/DSP-Lab", "sub_path": "Time domain representation of LTI systems/autocorrelation.py", "file_name": "autocorrelation.py", "file_ext": "py", "file_size_in_byte": 394, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.random.uniform", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.stem", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.stem", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "777497607", "text": "''' \n個人股票、外匯清單\n'''\nfrom pymongo import MongoClient\nimport urllib.parse\nimport datetime\nimport stockprice\nimport EXRate\nfrom EXRate import *\ncurrency_list = { \n \"USD\" : \"美元\",\n \"JPY\": \"日圓\",\n \"HKD\" :\"港幣\",\n \"GBP\": \"英鎊\",\n \"AUD\": \"澳幣\",\n \"CAD\" : \"加拿大幣\",\n \"CHF\" : \"瑞士法郎\", \n \"SGD\" : \"新加坡幣\",\n \"ZAR\" : \"南非幣\",\n \"SEK\" : \"瑞典幣\",\n \"NZD\" : \"紐元\", \n \"THB\" : \"泰幣\", \n \"PHP\" : \"菲國比索\", \n \"IDR\" : \"印尼幣\", \n \"KRW\" : \"韓元\", \n \"MYR\" : \"馬來幣\", \n \"VND\" : \"越南盾\", \n \"CNY\" : \"人民幣\",\n }\n \n# Authentication Database認證資料庫\nstockDB='mystock'\ncurrencyDB = 'mycurrency'\n# DB connection\ndef constructor_stock(): \n client = MongoClient(\"URL\")\n db = client[stockDB]\n return db\n\ndef constructor_currency():\n client = MongoClient(\"URL\")\n db = client[currencyDB]\n return db\n\n# ---------------------------- 新增 修改 刪除前 先確認股票清單中是否有該檔股票--------------------------\ndef query_stock(user_name, stockNumber):\n db = constructor_stock()\n collect = db[user_name]\n dataList = collect.find_one({\"favorite_stock\": stockNumber})\n if dataList == None:\n print(\"none\")\n return dataList\n\n#----------------------------更新暫存的股票名稱--------------------------\ndef update_my_stock(user_name, stockNumber, condition , target_price):\n db=constructor_stock()\n collect = db[user_name]\n collect.update_many({\"favorite_stock\": stockNumber }, {'$set': {'condition':condition , \"price\": target_price}})\n content = f\"股票{stockNumber}更新成功\"\n return content\n\n# ---------------- 秀出使用者的股票清單 ----------------\ndef show_my_stock(userID, user_name, msg):\n db = constructor_stock()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的股票清單為空,請透過指令新增股票至清單中\"\n content = \"\"\n for i in range(len(dataList)):\n content += stockprice.getprice(dataList[i][\"favorite_stock\"], msg)\n return content\n# ----------- 新增使用者的股票 -------------\ndef write_my_stock(userID, user_name, stockNumber, condition , target_price):\n db=constructor_stock()\n collect = db[user_name]\n is_exit = collect.find_one({\"favorite_stock\": stockNumber})\n if is_exit != None :\n content = update_my_stock(user_name, stockNumber, condition , target_price)\n return content\n else:\n collect.insert_one({\n \"userID\": userID,\n \"favorite_stock\": stockNumber,\n \"condition\" : condition,\n \"price\" : target_price,\n \"tag\": \"stock\",\n \"date_info\": datetime.datetime.now()\n })\n return f\"{stockNumber}已新增至您的股票清單\"\n \n# ---------------- 刪除使用者特定的股票 ----------------\ndef delete_my_stock(user_name, stockNumber):\n db = constructor_stock()\n collect = db[user_name]\n collect.delete_one({'favorite_stock': stockNumber})\n return stockNumber + \"刪除成功\"\n\n# ---------------- 刪除使用者股票清單內所有的股票 ----------------\ndef delete_my_allstock(user_name, userID):\n db = constructor_stock()\n collect = db[user_name]\n collect.delete_many({'userID': userID})\n return \"全部股票刪除成功\"\n\n# ---------------- 秀出使用者的股票條件 ----------------\ndef show_stock_setting(user_name, userID):\n db = constructor_stock()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的股票清單為空,請透過指令新增股票至清單中\"\n content = \"您清單中的選股條件為: \\n\"\n for i in range(len(dataList)):\n content += f'{dataList[i][\"favorite_stock\"]} {dataList[i][\"condition\"]} {dataList[i][\"price\"]}\\n'\n return content\n\n#---------------------------- 更新匯率清單的匯率 --------------------------\ndef update_my_currency(user_name, currency, condition , target_price):\n db=constructor_currency()\n collect = db[user_name]\n collect.update_many({\"favorite_currency\": currency }, {'$set': {'condition':condition , \"price\": target_price}})\n return f\"{currency_list[currency]}更新成功\"\n\n#---------------------------- 新增匯率至匯率清單 --------------------------\ndef write_my_currency(userID , user_name, currency, condition, target_price):\n db = constructor_currency()\n collect = db[user_name]\n is_exit = collect.find_one({\"favorite_currency\": currency})\n content = \"\"\n if is_exit != None : return update_my_currency(user_name, currency, condition , target_price)\n else:\n collect.insert_one({\n \"userID\": userID,\n \"favorite_currency\": currency,\n \"condition\" : condition,\n \"price\" : target_price,\n \"tag\": \"currency\",\n \"date_info\": datetime.datetime.now()\n })\n return f\"{currency_list[currency]}已新增至您的外幣清單\"\n\n#---------------------------- 查詢匯率清單的匯率(文字) --------------------------\ndef show_my_currency(userID, user_name):\n db = constructor_currency()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的外幣清單為空,請透過指令新增外幣至清單中\"\n content = \"\"\n for i in range(len(dataList)):\n content += EXRate.showCurrency(dataList[i][\"favorite_currency\"]) \n return content\n\n\n# ---------------- 刪除使用者清單特定的匯率 ----------------\ndef delete_my_currency(user_name, currency):\n db = constructor_currency()\n collect = db[user_name]\n collect.delete_one({'favorite_currency': currency})\n return currency_list[currency] + \"刪除成功\"\n\n\n#---------------------------- 刪除匯率清單全部匯率 --------------------------\ndef delete_my_allcurrency(user_name, userID):\n db = constructor_currency()\n collect = db[user_name]\n collect.delete_many({'userID': userID})\n return \"外幣清單已清空\"\n\n# ---------------- 查詢使用者的匯率設定條件 ----------------\ndef show_currency_setting(user_name, userID):\n db = constructor_currency()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的外幣清單為空,請透過指令新增外幣至清單中\"\n content = \"您清單中外幣篩選條件為: \\n\"\n for i in range(len(dataList)):\n content += f'{dataList[i][\"favorite_currency\"]} {dataList[i][\"condition\"]} {dataList[i][\"price\"]}\\n'\n return content\n", "repo_name": "ChenTsungYu/stock_linebot_public", "sub_path": "mongodb.py", "file_name": "mongodb.py", "file_ext": "py", "file_size_in_byte": 6880, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 36, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 41, "usage_type": "call"}, {"api_name": "stockprice.getprice", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "attribute"}, {"api_name": "EXRate.showCurrency", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "72547979713", "text": "import sys\nimport os\nimport bpy\nimport json\nimport argparse\n\n# Get the path of the PLUME directory\nif os.path.dirname(__file__) not in sys.path:\n sys.path.append(os.path.dirname(__file__))\n\n# # Fetch the blender directory\nblend_dir = os.path.dirname(bpy.data.filepath)\nif blend_dir not in sys.path:\n sys.path.append(blend_dir)\n\nfrom config import Config, Color\n\nclass MeshGeneration:\n def __init__(self, generation_name_p, index_p) -> None:\n self.generation_name = str(generation_name_p)\n self.index = str(index_p)\n self.path = Config.PLUME_DIR.value+\"/data/raw_data/\"+self.generation_name+\"/\"+self.index+\"/data.json\"\n self.saved_mesh_path = Config.PLUME_DIR.value+\"/data/mesh_files/\"+self.generation_name+\"/\"+self.index+\"/mesh.obj\"\n self.json_file = open(self.path)\n self.data = json.load(self.json_file)\n self.obj = None\n self.mesh = None\n self.generate_mesh()\n exit()\n\n\n def generate_mesh(self):\n \"\"\"\n Main function to create the mesh\n \"\"\"\n self.initial_cleanup()\n verts, edges = self.extract_mesh_data()\n result_loading = self.load_mesh_in_blender(verts_p=verts, edges_p=edges)\n if result_loading == -1:\n print(f\"{Color.FAIL.value}There was a problem while creating the mesh{Color.ENDC.value}\")\n exit()\n self.blender_modifiers()\n self.flip_normals()\n self.export_mesh()\n self.json_file.close()\n\n\n def initial_cleanup(self):\n \"\"\"\n Remove the default object of Blender\n \"\"\"\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects['Cube'].select_set(True)\n bpy.ops.object.delete()\n bpy.data.objects['Camera'].select_set(True)\n bpy.ops.object.delete()\n bpy.data.objects['Light'].select_set(True)\n bpy.ops.object.delete()\n\n\n def extract_mesh_data(self):\n \"\"\"\n Load graph data into python variables (from a dictionary)\n -Use [node_id][\"coordinates\"][\"x\"] or [\"y\"]\n \"\"\"\n verts, edges = [], []\n for i in self.data:\n verts.append([\n self.data[i][\"coordinates\"]['x'],\n self.data[i][\"coordinates\"]['y'],\n 0.0\n ])\n for child in self.data[i]['children']:\n edges.append([\n self.data[i]['id']-1,\n child-1\n ]) \n return verts, edges\n \n\n def load_mesh_in_blender(self, verts_p, edges_p):\n \"\"\"\n Load the verticies and edges in blender\n \"\"\"\n verts = verts_p\n edges = edges_p\n self.mesh = bpy.data.meshes.new('Underground')\n self.obj = bpy.data.objects.new('Underground', self.mesh)\n col = bpy.data.collections.get(\"Collection\")\n col.objects.link(self.obj)\n bpy.context.view_layer.objects.active = self.obj\n\n self.mesh.from_pydata(verts, edges, [])\n if not self.mesh:\n return -1\n\n\n def blender_modifiers(self):\n \"\"\"\n Create and apply modifiers\n \"\"\"\n mod_sub = bpy.ops.object.modifier_add(type='SUBSURF')\n mod_skin = self.obj.modifiers.new('Skin', 'SKIN')\n mod_sub = bpy.ops.object.modifier_add(type='SUBSURF')\n\n # Apply modifiers\n apply_mod = bpy.ops.object.modifier_apply(modifier='Subdivision')\n apply_mod = bpy.ops.object.modifier_apply(modifier='Skin') # Create a mesh skin arount the graph\n apply_mod = bpy.ops.object.modifier_apply(modifier='Subdivision.001')\n\n\n def flip_normals(self):\n \"\"\"\n Flip the normals\n \"\"\"\n bpy.ops.object.editmode_toggle()\n bpy.ops.mesh.select_all(action='SELECT') # Select all faces\n bpy.ops.mesh.flip_normals() # just flip normals\n\n\n def export_mesh(self):\n \"\"\"\n Export the mesh in the desired format\n \"\"\"\n bpy.data.objects['Underground'].select_set(True)\n\n # Make sure the directory exist\n if not os.path.exists(os.path.dirname(self.saved_mesh_path)):\n os.makedirs(os.path.dirname(self.saved_mesh_path))\n\n # Export the mesh\n bpy.ops.wm.obj_export(filepath=self.saved_mesh_path,\n export_selected_objects=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"PLUME project. Mesh generation based on a json file that provide data as x,y,z coordinates. Then based on the created structure, apply a circular skin around it to create the edges of the underground mesh.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n \n parser.add_argument(\"-index\", help=\"Index used for the path\", type=int)\n parser.add_argument(\"-name\", help=\"Name of the current graph generation\", type=str)\n parser.add_argument(\"--background\", action=\"store_true\", help=\"Run the script without GUI\")\n parser.add_argument(\"--python\", action=\"store_true\", help=\"Run blender with a python file\")\n parser.add_argument(\"file\", help=\"Path and name of the python file\")\n \n\n args = parser.parse_args()\n arguments = vars(args)\n generator = MeshGeneration(index_p=arguments['index'],\n generation_name_p=arguments['name'])", "repo_name": "Gabryss/P.L.U.M.E", "sub_path": "src/blender.py", "file_name": "blender.py", "file_ext": "py", "file_size_in_byte": 5125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.Config.PLUME_DIR", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 22, "usage_type": "name"}, {"api_name": "config.Config.PLUME_DIR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 23, "usage_type": "name"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "config.Color.FAIL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "config.Color", "line_number": 40, "usage_type": "name"}, {"api_name": "config.Color.ENDC", "line_number": 40, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.select_all", "line_number": 52, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 53, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.delete", "line_number": 54, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 54, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 55, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.delete", "line_number": 56, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 56, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 57, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.delete", "line_number": 58, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 58, "usage_type": "attribute"}, {"api_name": "bpy.data.meshes.new", "line_number": 87, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 87, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.new", "line_number": 88, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 88, "usage_type": "attribute"}, {"api_name": "bpy.data.collections.get", "line_number": 89, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 89, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 91, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_add", "line_number": 102, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 102, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_add", "line_number": 104, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 104, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_apply", "line_number": 107, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 107, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_apply", "line_number": 108, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 108, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_apply", "line_number": 109, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 109, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.editmode_toggle", "line_number": 116, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 116, "usage_type": "attribute"}, {"api_name": "bpy.ops.mesh.select_all", "line_number": 117, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 117, "usage_type": "attribute"}, {"api_name": "bpy.ops.mesh.flip_normals", "line_number": 118, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 118, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 128, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "bpy.ops.wm.obj_export", "line_number": 132, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 132, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 137, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 139, "usage_type": "attribute"}]} +{"seq_id": "1075856269", "text": "from selenium.common.exceptions import InvalidArgumentException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nclass BasePage():\n def __init__(self, browser, url):\n self.browser = browser\n self.url = url\n\n def open(self):\n try:\n self.browser.get(self.url)\n except InvalidArgumentException:\n raise AssertionError('Не удалось открыть сайт')\n\n def is_present(self, locator, timer=10):\n try:\n wait = EC.presence_of_element_located(locator)\n element = WebDriverWait(self.browser, timer).until(wait)\n return element\n except TimeoutException:\n raise AssertionError('Элемент не найден')\n\n def is_clickable(self, locator, timer=10):\n try:\n wait = EC.element_to_be_clickable(locator)\n element = WebDriverWait(self.browser, timer).until(wait)\n return element\n except TimeoutException:\n raise AssertionError('Элемент не активен или отсутствует')\n\n def click_gently(self, locator, timer=10):\n self.is_clickable(locator).click()\n\n def check_url(self, url):\n return url.lower() == self.browser.current_url\n", "repo_name": "Hellsingoff/SeleniumTest", "sub_path": "features/steps/pages/base_page.py", "file_name": "base_page.py", "file_ext": "py", "file_size_in_byte": 1429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "selenium.common.exceptions.InvalidArgumentException", "line_number": 15, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 28, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 29, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "4242139661", "text": "# Veri önişleme ve analizi için kullanılacak kütüphane eklenir\nimport pandas as pd\n\n# Kırmızı şarapların olduğu veri seti okunur\nred_wine_dataset = pd.read_csv('../input/qualityofredwine/winequality-red.csv')\n\n# Beyaz şarapların olduğu veri seti okunur\nwhite_wine_dataset = pd.read_csv('../input/qualityofwhitewine/winequality-white.csv')\n\n# Red şarapların olduğu veri setinin her bir sütununda toplam kaç tane boş kayıt olduğuna bakılır\nred_wine_dataset.isnull().sum()\n\n# Red şarapların olduğu veri setindeki tahmin edilecek değerin kaç farklı değer ve bu değerlerin toplam adetlerine bakılır\nred_wine_dataset.quality.value_counts()\n\n# Beyaz şarapların olduğu veri setinin her bir sütununda toplam kaç tane boş kayıt olduğuna bakılır\nwhite_wine_dataset.isnull().sum()\n\n# Beyaz şarapların olduğu veri setindeki tahmin edilecek değerin kaç farklı değer ve bu değerlerin toplam adetlerine bakılır\nwhite_wine_dataset.quality.value_counts()\n\n# Kırmızı ve beyaz şarapların olduğu veri setleri ayrı ayrı olarak parçalanır\n# X değişkenlerinde veri setlerin öznitelikleri ve y değişkenlerinde de tahmin edilecek değer yer alır\nred_wine_X = red_wine_dataset.iloc[:,:-1].values\nred_wine_y = red_wine_dataset.iloc[:,-1].values\n\nwhite_wine_X = white_wine_dataset.iloc[:,:-1].values\nwhite_wine_y = white_wine_dataset.iloc[:,-1].values\n\n# Kırmızı ve beyaz şarap veri setleri, eğitim ve test verileri olarak ayrılır\nfrom sklearn.model_selection import train_test_split\nred_wine_X_train, red_wine_X_test, red_wine_y_train, red_wine_y_test = train_test_split(red_wine_X, red_wine_y, test_size = 0.20, random_state = 4)\nwhite_wine_X_train, white_wine_X_test, white_wine_y_train, white_wine_y_test = train_test_split(white_wine_X, white_wine_y, test_size = 0.20, random_state = 4)\n\n# Tahminlerden sonra veri çatılarını birleştirmek için yedeklenir\ncache_red_wine_X_test = red_wine_X_test\ncache_red_wine_y_test = red_wine_y_test\ncache_white_wine_X_test = white_wine_X_test\ncache_white_wine_y_test = white_wine_y_test\n\n# Öznitelikler arasındaki korelasyon sayıları düşük olduğundan veri setlerin öznitelikleri ölçeklendirilir\nfrom sklearn.preprocessing import StandardScaler\nred_wine_sc = StandardScaler()\nred_wine_X_train = red_wine_sc.fit_transform(red_wine_X_train)\nred_wine_X_test = red_wine_sc.transform(red_wine_X_test)\nwhite_wine_sc = StandardScaler()\nwhite_wine_X_train = white_wine_sc.fit_transform(white_wine_X_train)\nwhite_wine_X_test = white_wine_sc.transform(white_wine_X_test)\n\n# y değişkenlerindeki nümerik değerler kategorik hale dönüştürülür\nred_wine_y_train = red_wine_y_train.astype('object')\nred_wine_y_test = red_wine_y_test.astype('object')\nwhite_wine_y_train = white_wine_y_train.astype('object')\nwhite_wine_y_test = white_wine_y_test.astype('object')\n\n# Kategorik veriler sayısallaştırılır\nfrom sklearn.preprocessing import LabelEncoder\nred_wine_labelencoder_y = LabelEncoder()\nred_wine_y_train = red_wine_labelencoder_y.fit_transform(red_wine_y_train)\nred_wine_y_test = red_wine_labelencoder_y.transform(red_wine_y_test)\nwhite_wine_labelencoder_y = LabelEncoder()\nwhite_wine_y_train = white_wine_labelencoder_y.fit_transform(white_wine_y_train)\nwhite_wine_y_test = white_wine_labelencoder_y.transform(white_wine_y_test)\n\n# Sayısal olan kategorik veriler ikili sisteme dönüştürülür\nfrom keras.utils import np_utils\nred_wine_y_train = np_utils.to_categorical(red_wine_y_train)\nred_wine_y_test = np_utils.to_categorical(red_wine_y_test)\nwhite_wine_y_train = np_utils.to_categorical(white_wine_y_train)\nwhite_wine_y_test = np_utils.to_categorical(white_wine_y_test)\n\n# Keras'ın model ve katman kütüphaneleri eklenir\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n\n# Kırmızı veri seti için Sequential nesnesinden değişken yaratılır ve değişkene 4 katman eklenir\nred_wine_classifier = Sequential()\nred_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\nred_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nred_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nred_wine_classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'softmax'))\n\n# Model, ilgili parametreler ile derlenir\nred_wine_classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Bu örnek model eğitim ve test verileriyle eğitilir\nred_wine_history = red_wine_classifier.fit(red_wine_X_train, red_wine_y_train, epochs = 200, batch_size = 128, verbose = 2, validation_data = (red_wine_X_test, red_wine_y_test))\n\n# Beyaz veri seti için Sequential nesnesinden değişken yaratılır ve değişkene 4 katman eklenir\nwhite_wine_classifier = Sequential()\nwhite_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\nwhite_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nwhite_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nwhite_wine_classifier.add(Dense(units = 7, kernel_initializer = 'uniform', activation = 'softmax'))\n\n# Model, ilgili parametreler ile derlenir\nwhite_wine_classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Bu örnek model eğitim ve test verileriyle eğitilir\nwhite_wine_history = white_wine_classifier.fit(white_wine_X_train, white_wine_y_train, epochs = 200, batch_size = 128, verbose = 2, validation_data = (white_wine_X_test, white_wine_y_test))\n\n# Veri görselleştirmede kullanılan kütüphane eklenir\nfrom matplotlib import pyplot as plt\n\n# Kırmızı ve beyaz şarap için oluşturulan modellerin eğitim boyunca başarı oranını grafiksel olarak gösterir\nplt.figure(figsize=(15,3))\nplt.subplot(1, 2, 1)\n\nplt.plot(red_wine_history.history['acc'])\nplt.plot(red_wine_history.history['val_acc'])\nplt.title('Red Wine Model Accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='lower right')\n\nplt.figure(figsize=(15,3))\nplt.subplot(1, 2, 1)\n\nplt.plot(white_wine_history.history['acc'])\nplt.plot(white_wine_history.history['val_acc'])\nplt.title('White Wine Model Accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='lower right')\nplt.show()\n\n# Kırmızı şarap için eğitilen model tahminleme işlemine tabi tutulur\nred_wine_y_pred = red_wine_classifier.predict(red_wine_X_test)\n\n# Beyaz şarap için eğitilen model tahminleme işlemine tabi tutulur\nwhite_wine_y_pred = white_wine_classifier.predict(white_wine_X_test)\n\n# Kırmızı şarap tahminleri aşağıdaki döngüde 6'lık sisteme getirilir\nred_wine_max_i = red_wine_y_pred.max(axis=1)\nfor i in range(len(red_wine_y_pred)):\n for j in range(6):\n if red_wine_y_pred[i,j] == red_wine_max_i[i]:\n red_wine_y_pred[i,j] = 1\n else:\n red_wine_y_pred[i,j] = 0\n \n# Beyaz şarap tahminleri aşağıdaki döngüde 7'lik sisteme getirilir\nwhite_wine_max_i = white_wine_y_pred.max(axis=1)\nfor i in range(len(white_wine_y_pred)):\n for j in range(7):\n if white_wine_y_pred[i,j] == white_wine_max_i[i]:\n white_wine_y_pred[i,j] = 1\n else:\n white_wine_y_pred[i,j] = 0\n \n# Kırmızı şarap tahminlerinin toplam doğruluk oranı hesaplanır\nred_wine_crt_values = (red_wine_y_pred == red_wine_y_test).sum()\nred_wine_wrong_values = (red_wine_y_pred != red_wine_y_test).sum()\nred_wine_total = red_wine_crt_values+red_wine_wrong_values\nred_wine_result = red_wine_crt_values/red_wine_total\nprint(red_wine_result)\n\n# Beyaz şarap tahminlerinin toplam doğruluk oranı hesaplanır\nwhite_wine_crt_values = (white_wine_y_pred == white_wine_y_test).sum()\nwhite_wine_wrong_values = (white_wine_y_pred != white_wine_y_test).sum()\nwhite_wine_total = white_wine_crt_values+white_wine_wrong_values\nwhite_wine_result = white_wine_crt_values/white_wine_total\nprint(white_wine_result)\n\n# Karmaşıklık matrislerini bulmak adına kullanılan Python kütüphanesi eklenir\nfrom sklearn.metrics import confusion_matrix\n# Matematiksel işlemlerde kullanılan Python kütüphanesi eklenir\nimport numpy as np\n\n# Kırmızı şarapların tahmin doğruluğunu görmek adına karmaşıklık matrisi oluşturulur\nred_wine_y_test = [np.where(r==1)[0][0] for r in red_wine_y_test]\nred_wine_y_pred = [np.where(r==1)[0][0] for r in red_wine_y_pred]\nred_wine_cm = confusion_matrix(red_wine_y_test,red_wine_y_pred)\nprint(red_wine_cm)\n\n# Beyaz şarapların tahmin doğruluğunu görmek adına karmaşıklık matrisi oluşturulur\nwhite_wine_y_test = [np.where(r==1)[0][0] for r in white_wine_y_test]\nwhite_wine_y_pred = [np.where(r==1)[0][0] for r in white_wine_y_pred]\nwhite_wine_cm = confusion_matrix(white_wine_y_test, white_wine_y_pred)\nprint(white_wine_cm)\n\n# Kırmızı ve beyaz şarap tahminleri eski tablolarla birleştirilir\nred_wine_X_test = pd.DataFrame(cache_red_wine_X_test)\nred_wine_y_test = pd.DataFrame(red_wine_y_test)\nred_wine_y_pred = pd.DataFrame(red_wine_y_pred)\n\nred_wine_X_test = pd.concat([red_wine_X_test, red_wine_y_test], axis=1)\nred_wine_X_test = pd.concat([red_wine_X_test, red_wine_y_pred], axis=1)\nred_wine_X_test.columns = [\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\",\"real quality\",\"predicted quality\"]\n\nwhite_wine_X_test = pd.DataFrame(cache_white_wine_X_test)\nwhite_wine_y_test = pd.DataFrame(white_wine_y_test)\nwhite_wine_y_pred = pd.DataFrame(white_wine_y_pred)\nwhite_wine_X_test = pd.concat([white_wine_X_test, white_wine_y_test], axis=1)\nwhite_wine_X_test = pd.concat([white_wine_X_test, white_wine_y_pred], axis=1)\nwhite_wine_X_test.columns = [\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\",\"real quality\",\"predicted quality\"]\n\nred_wine_X_test.to_csv('prediction-of-red-wine-quality.csv')\nwhite_wine_X_test.to_csv('prediction-of-white-wine-quality.csv')\n", "repo_name": "burakcantimucin/WineQualityPrediction", "sub_path": "wine-quality-prediction-with-keras.py", "file_name": "wine-quality-prediction-with-keras.py", "file_ext": "py", "file_size_in_byte": 10221, "program_lang": "python", "lang": "tr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 67, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 69, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 70, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 173, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 180, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 186, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 188, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 194, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 195, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "42842001518", "text": "from rest_framework import filters, mixins, viewsets\nfrom rest_framework.parsers import MultiPartParser\n\nfrom src.api.images.models import UploadedImages\nfrom src.api.images.serializers import ImageSerializer\n\n\nclass ImageViewSet(\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.CreateModelMixin,\n viewsets.GenericViewSet,\n):\n serializer_class = ImageSerializer\n queryset = UploadedImages.objects.all()\n\n lookup_field = \"id\"\n\n parser_classes = (MultiPartParser,)\n\n filter_backends = [\n filters.SearchFilter,\n ]\n search_fields = [\n \"title\",\n ]\n", "repo_name": "chilledsnake/img_api", "sub_path": "src/api/images/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 12, "usage_type": "name"}, {"api_name": "src.api.images.serializers.ImageSerializer", "line_number": 14, "usage_type": "name"}, {"api_name": "src.api.images.models.UploadedImages.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "src.api.images.models.UploadedImages.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "src.api.images.models.UploadedImages", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.parsers.MultiPartParser", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.filters.SearchFilter", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.filters", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "4070576906", "text": "from django.conf import settings\nfrom django.conf.urls.defaults import *\nfrom django.views.generic.simple import direct_to_template\nfrom mysite.liga.sitemap import TeamSitemap, TableResultsSitemap, PlayerSitemap, StaticSitemap\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom pinax.apps.account.openid_consumer import PinaxConsumer\n\n\nhandler500 = \"pinax.views.server_error\"\n\nsitemaps={\n \"team\":TeamSitemap,\n \"tableresults\":TableResultsSitemap,\n \"player\":PlayerSitemap,\n \"static\":StaticSitemap,\n}\n\nurlpatterns = patterns(\"\",\n #url(r\"^$\", direct_to_template, {\n # \"template\": \"homepage.html\",\n #}, name=\"home\"),\n url(r\"^$\", include(\"liga.urls\")),\n url(r\"^tabela/$\", direct_to_template, {\"template\": \"tabela.html\"}, name=\"tabela\"),\n url(r\"^profil/(?P\\w+)$\", 'mysite.liga.views.manage_profil'),\n url(r\"^tabela/pdf$\", 'mysite.liga.views.create_pdf'),\n url(r\"^druzyny$\", 'mysite.liga.views.manage_team'),\n url(r\"^mecze/(?P\\w+)$\", 'mysite.liga.views.manage_match'),\n url(r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),\n url(r\"^admin/invite_user/$\", \"pinax.apps.signup_codes.views.admin_invite_user\", name=\"admin_invite_user\"),\n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^about/\", include(\"about.urls\")),\n url(r\"^account/\", include(\"pinax.apps.account.urls\")),\n url(r\"^openid/\", include(PinaxConsumer().urls)),\n)\n\n\nif settings.SERVE_MEDIA:\n urlpatterns += patterns(\"\",\n url(r\"\", include(\"staticfiles.urls\")),\n )\n", "repo_name": "winx88/ProjectsPythonDjango", "sub_path": "Liga/mysite/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 7, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "mysite.liga.sitemap.TeamSitemap", "line_number": 15, "usage_type": "name"}, {"api_name": "mysite.liga.sitemap.TableResultsSitemap", "line_number": 16, "usage_type": "name"}, {"api_name": "mysite.liga.sitemap.PlayerSitemap", "line_number": 17, "usage_type": "name"}, {"api_name": "mysite.liga.sitemap.StaticSitemap", "line_number": 18, "usage_type": "name"}, {"api_name": "django.views.generic.simple.direct_to_template", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}, {"api_name": "pinax.apps.account.openid_consumer.PinaxConsumer", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.settings.SERVE_MEDIA", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "2749950865", "text": "import asyncio\nimport os\n\nimport discord\nimport pymongo\nfrom discord.ext import commands\nfrom pymongo import MongoClient\n\nfrom __main__ import logger\n\ncluster = MongoClient(os.getenv('MONGO_CONNECTION_URL'))\n\ndb = cluster['UserData']\n\ncollection = db['UserData']\n\n\nclass Db(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f'{self.__class__.__name__} Cog has been loaded\\n-----')\n\n def post(post_data: dict):\n mq = {\"_id\": post_data['_id']}\n if (collection.count_documents(mq) == 0):\n collection.insert_one(post_data)\n else:\n q = {\"_id\": post_data['_id']}\n user = collection.find(q)\n for r in user:\n score = r['score']\n score = score + 1\n test = post_data['is_admin']\n collection.update_one({\"_id\": post_data['_id']}, {\"$set\": {\"score\": score}})\n collection.update_one({\"_id\": post_data['_id']}, {\"$set\": {\"is_admin\": test}})\n logger.info(f'Posted data to DB! Data: \"{post_data}\"')\n\n\ndef setup(bot):\n bot.add_cog(Db(bot))\n", "repo_name": "RandomRuskiy/brads-server-bot", "sub_path": "cogs/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 1152, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 18, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 18, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 23, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 23, "usage_type": "name"}, {"api_name": "__main__.logger.info", "line_number": 40, "usage_type": "call"}, {"api_name": "__main__.logger", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "10182332159", "text": "from keras.models import Sequential, Model, model_from_json, load_model\nfrom keras.layers import Conv1D, MaxPool1D, Dropout, Flatten, Dense, Activation, Add, Input, AveragePooling1D, Concatenate, Lambda\nfrom keras.callbacks import ModelCheckpoint,CSVLogger,EarlyStopping\nfrom keras.losses import binary_crossentropy, categorical_crossentropy\nfrom keras import backend as K\nfrom keras.optimizers import SGD,Adam\n\ndef residual_block(x, filters, conv_num=3, activation=\"relu\"):\n s = Conv1D(filters, 1, padding=\"same\")(x)\n for i in range(conv_num - 1):\n x = Conv1D(filters, 3, padding=\"same\")(x)\n x = Activation(activation)(x)\n x = Conv1D(filters, 3, padding=\"same\")(x)\n x = Add()([x, s])\n x = Activation(activation)(x)\n return MaxPool1D(pool_size=2, strides=2)(x)\n\ndef create_base_model(input_shape, embeddings,num_classes):\n inputs = keras.layers.Input(shape=input_shape, name=\"input\")\n\n x = residual_block(inputs, 16, 2)\n x = residual_block(x, 32, 2)\n x = residual_block(x, 64, 3)\n x = residual_block(x, 128, 3)\n x = residual_block(x, 128, 3)\n\n x = AveragePooling1D(pool_size=3, strides=3)(x)\n x = Flatten()(x)\n x = Dense(256, activation=\"relu\")(x)\n x = Dense(units= embeddings, activation=\"relu\",name='embedding')(x)\n\n outputs = Dense(num_classes, activation=\"softmax\", name=\"output\")(x)\n\n base_model=Model(inputs, outputs)\n base_model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=\"Adam\",\n metrics=['accuracy'])\n return base_model\n", "repo_name": "dboursinos/Efficient-Probability-Intervals-Classification-Inductive-Venn-Predictors", "sub_path": "dynamic_taxonomies/Ecobee_Thermostat/cnn_1D.py", "file_name": "cnn_1D.py", "file_ext": "py", "file_size_in_byte": 1551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.layers.Conv1D", "line_number": 9, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.layers.Add", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.MaxPool1D", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.models.layers.Input", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.models.layers", "line_number": 19, "usage_type": "attribute"}, {"api_name": "keras.models", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.layers.AveragePooling1D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "28056575221", "text": "import sqlite3\n\nimport pygame.font\n\nfrom commonFunctions import print_text\nfrom commonSettings import *\n\n\ndef db_create():\n\n pass\n\n\ndef db_get_players_scores(level):\n \"\"\"returns table: player_id, name, prev score, record score\"\"\"\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = \"\"\"\n SELECT p.player_id, p.player_name, rs.score, pls.record_score, MAX(rs.round_id) FROM player p\n LEFT JOIN player_level_scores pls ON p.player_id = pls.player_id AND pls.level_id = ?\n LEFT JOIN round_scores rs ON p.player_id = rs.player_id AND rs.level_id = ?\n GROUP BY p.player_name\n ORDER BY p.player_id ASC\n \"\"\".format(table='t')\n cursor.execute(sql, (level, level))\n db_result = cursor.fetchall()\n #print(\"DB RESULT: \",db_result)\n conn.close()\n return db_result\n\ndef db_get_player_name(id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = \"SELECT player_name FROM player WHERE player_id = ?\"\n cursor.execute(sql, (id, ))\n db_result = cursor.fetchall()\n conn.close()\n return db_result[0][0]\n\ndef db_get_level_name(id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = \"SELECT level_name FROM game_levels WHERE gamelevel_id = ?\"\n cursor.execute(sql, (id, ))\n db_result = cursor.fetchall()\n conn.close()\n return db_result[0][0]\n\n\ndef db_add_round_result(player, level, score):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql='INSERT INTO round_scores (player_id, level_id, score) VALUES('+str(player)+', '+str(level)+', '+str(score)+')'\n cursor.execute(sql)\n conn.commit()\n #db_result = cursor.fetchall()\n\n print(\"DB ROUND RESULT UPDATED: \", str(score))\n conn.close()\n\n\ndef db_update_new_record(player, level, score):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = 'SELECT record_score FROM player_level_scores WHERE player_id = '+str(player)+' AND level_id = '+str(level)\n\n cursor.execute(sql)\n db_result = cursor.fetchall()\n #print(\"DB RESULT in RECORD CHECK: \", db_result)\n\n if db_result[0][0] < score:\n sql = 'UPDATE player_level_scores SET record_score='+str(score)+' WHERE player_id= '+str(player)+' AND level_id = '+str(level)\n cursor.execute(sql)\n conn.commit()\n print(\"DB RECORD UPDATED: \", str(score))\n #db_result = cursor.fetchall()\n print(\"DB RECORD NOT UPDATED: \", str(db_result[0][0]))\n conn.close()\n\n pass\n\n\n\n# to Avoid Database errors\n# try:\n# cursor.execute(sql_statement)\n# result = cursor.fetchall()\n# except sqlite3.DatabaseError as err:\n# print(\"Error: \", err)\n# else:\n# conn.commit()", "repo_name": "voscovvo/MineSmart", "sub_path": "database_functions.py", "file_name": "database_functions.py", "file_ext": "py", "file_size_in_byte": 2695, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "9002629903", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'movies'\n\nurlpatterns = [\n path('', views.movie_list, name='movie_list'),\n path('/', views.detail, name=\"detail\"),\n path('/score/new', views.create_score, name='create_score'),\n path('/score//delete', views.delete_score, name=\"delete\"),\n path('/score//update', views.update_score, name=\"update\"),\n path('dbmake/', views.movie_db),\n path('dbmake2/', views.movie_db2),\n]", "repo_name": "Hansung-Lee/SSAFY", "sub_path": "ssafy_project/last_project/movies/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "38525982466", "text": "from deposit_gui.view.vmdiarea_frames.abstract_mdiarea_frame import AbstractMDIAreaFrame\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.relation_frame import RelationFrame\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_table import QueryTabTable\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images import (QueryTabImagesLazy, QueryTabImages)\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph import (QueryTabGraphLazy, QueryTabGraph)\n\nfrom PySide2 import (QtWidgets, QtCore, QtGui)\n\nclass QueryFrame(AbstractMDIAreaFrame, QtWidgets.QFrame):\n\t\n\tsignal_query_selected = QtCore.Signal(list)\t\t# [QueryItem, ...]\n\tsignal_query_activated = QtCore.Signal(object)\t# QueryItem\n\tsignal_object_selected = QtCore.Signal(list)\t# [DObject, ...]\n\tsignal_relation_selected = QtCore.Signal(list)\t# [(Source, Target, label), ...]\n\tsignal_add_object = QtCore.Signal(object)\t# Query\n\tsignal_del_object = QtCore.Signal()\n\tsignal_del_descriptor = QtCore.Signal()\n\tsignal_edited = QtCore.Signal(object, object)\t# QueryItem, value\n\tsignal_drop_url = QtCore.Signal(object, str)\t# QueryItem, url\n\t\n\t# signal_class_link = QtCore.Signal(str)\t\t\t\t# class_name\n\t# signal_relation_link = QtCore.Signal(int, str, str)\t# obj_id, rel_label, class_name\n\t# signal_relation_unlink = QtCore.Signal(int, str, str)\t# obj_id, rel_label, class_name\n\t\n\t\n\tINITIAL_THUMBNAIL_SIZE = 128\n\t\n\tdef __init__(self, query, cmodel, cview):\n\t\t\n\t\tAbstractMDIAreaFrame.__init__(self)\n\t\tQtWidgets.QFrame.__init__(self)\n\t\t\n\t\tself._query = query\n\t\tself._cmodel = cmodel\n\t\tself._cview = cview\n\t\t\n\t\tself.setLayout(QtWidgets.QVBoxLayout())\n\t\tself.layout().setContentsMargins(0, 0, 0, 0)\n\t\tself.layout().setSpacing(0)\n\t\t\n\t\tself.relation_frame = RelationFrame()\n\t\tself.relation_frame.signal_object_link.connect(self.on_object_link)\n\t\tself.signal_class_link = self.relation_frame.signal_class_link\n\t\tself.signal_relation_link = self.relation_frame.signal_relation_link\n\t\tself.signal_relation_unlink = self.relation_frame.signal_relation_unlink\n\t\t\n\t\tself.footer = QueryFooter(self)\n\t\t\n\t\tself.tab_table = QueryTabTable(self)\n\t\tself.tab_images = QueryTabImagesLazy(self)\n\t\tself.tab_graph = QueryTabGraphLazy(self)\n\t\t\n\t\tself.tabs = QtWidgets.QTabWidget()\n\t\tself.tabs.addTab(self.tab_table, \"Table\")\n\t\tself.tabs.addTab(self.tab_images, \"Images\")\n\t\tself.tabs.addTab(self.tab_graph, \"Graph\")\n\t\t\n\t\tsplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)\n\t\tsplitter.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n\t\tself.layout().addWidget(splitter)\n\t\t\n\t\tframe_left = QtWidgets.QFrame()\n\t\tframe_left.setLayout(QtWidgets.QVBoxLayout())\n\t\tframe_left.layout().setContentsMargins(0, 0, 0, 0)\n\t\tframe_left.layout().setSpacing(0)\n\t\tframe_left.layout().addWidget(self.tabs)\n\t\tframe_left.layout().addWidget(self.footer)\n\n\t\tself.scroll_area = QtWidgets.QScrollArea()\n\t\tself.scroll_area.setWidgetResizable(True)\n\t\tself.scroll_area.setFrameStyle(QtWidgets.QFrame.NoFrame)\n\t\tself.scroll_area.setWidget(self.relation_frame)\n\t\t\n\t\tsplitter.addWidget(frame_left)\n\t\tsplitter.addWidget(self.scroll_area)\n\t\t\n\t\tself._filter_timer = QtCore.QTimer()\n\t\tself._filter_timer.setSingleShot(True)\n\t\tself._filter_timer.timeout.connect(self.on_filter_timer)\n\t\t\n\t\tself.footer.set_object_buttons_enabled(self._query.main_class != \"*\")\n\t\tself.footer.set_del_object_enabled(False)\n\t\t\n\t\tself.tabs.currentChanged.connect(self.on_tab_changed)\n\t\t\n\t\tself.update_count()\n\t\n\tdef title(self):\n\t\t\n\t\treturn self._query.querystr\n\t\n\tdef icon(self):\n\t\t\n\t\treturn \"dep_cube.svg\"\n\t\n\tdef update_query(self, objects = None, classes = None):\n\t\t\n\t\tif ((objects is None) and (classes is None)) or \\\n\t\t\t(\"*\" in self._query.classes) or \\\n\t\t\t(self._query.main_class is None) or \\\n\t\t\tset(self._query.classes).intersection([\n\t\t\t\tcls if isinstance(cls, str) else cls.name for cls in classes\n\t\t\t]) or self._query.objects.intersection([\n\t\t\t\tobj if isinstance(obj, int) else obj.id for obj in objects\n\t\t\t]):\n\t\t\t\n\t\t\tcurrent_index = self.tabs.currentIndex()\n\t\t\tif current_index > 0:\n\t\t\t\tself.tabs.setCurrentIndex(0)\n\t\t\t\n\t\t\tself._query.process()\n\t\t\tself._cview.progress.stop()\n\t\t\t\n\t\t\tself.tab_table.update_query()\n\t\t\tself.relation_frame.populate()\n\t\t\tif isinstance(self.tab_images, QueryTabImages):\n\t\t\t\tself.tab_images.update_query(\n\t\t\t\t\tself.tab_table.get_images(), self.tab_table.get_item_order()\n\t\t\t\t)\n\t\t\tif isinstance(self.tab_graph, QueryTabGraph):\n\t\t\t\tself.tab_graph.update_query()\n\t\t\t\n\t\t\tif current_index > 0:\n\t\t\t\tself.tabs.setCurrentIndex(current_index)\n\t\n\tdef select_all(self):\n\t\t\n\t\tself.get_current_tab().selectAll()\n\t\n\tdef clear_selection(self):\n\t\t\n\t\tself.get_current_tab().clearSelection()\n\t\n\tdef populate_tab_images(self):\n\t\t\n\t\tif isinstance(self.tab_images, QueryTabImages):\n\t\t\treturn\n\t\t\n\t\tself.tab_images = QueryTabImages(self, self.tab_table.get_images(), self.tab_table.get_item_order(), self._cmodel)\n\t\tself.tab_images.set_thumbnail_size(self.INITIAL_THUMBNAIL_SIZE)\n\t\tself.tabs.blockSignals(True)\n\t\tself.tabs.insertTab(1, self.tab_images, \"Images\")\n\t\tself.tabs.removeTab(2)\n\t\tself.tabs.setCurrentIndex(1)\n\t\tself.tabs.blockSignals(False)\n\t\n\tdef populate_tab_graph(self):\n\t\t\n\t\tif isinstance(self.tab_graph, QueryTabGraph):\n\t\t\treturn\n\t\tself.tab_graph = QueryTabGraph(self, set([self._cmodel.get_object(obj_id) for obj_id in self.tab_table.get_obj_ids()]))\n\t\tself.tabs.blockSignals(True)\n\t\tself.tabs.insertTab(2, self.tab_graph, \"Graph\")\n\t\tself.tabs.removeTab(3)\n\t\tself.tabs.setCurrentIndex(2)\n\t\tself.tabs.blockSignals(False)\n\t\n\tdef get_current_tab(self):\n\t\t\n\t\treturn [self.tab_table, self.tab_images, self.tab_graph][self.tabs.currentIndex()]\n\t\n\tdef update_count(self):\n\t\t\n\t\tself.footer.set_count(self.get_current_tab().get_row_count())\n\t\n\tdef get_header(self, col, user_role = False):\n\t\t# pass to deposit.AbstractExternalsource to provide header data from QueryTabTable\n\t\t\n\t\treturn self.tab_table._table_model.headerData(col, QtCore.Qt.Horizontal, QtCore.Qt.UserRole if user_role else QtCore.Qt.DisplayRole)\n\t\n\tdef get_item(self, row, col):\n\t\t# pass to deposit.AbstractExternalsource to provide data from QueryTabTable\n\t\t\n\t\treturn self.tab_table._table_model.index(row, col).data(QtCore.Qt.UserRole)\n\t\n\tdef get_row_count(self):\n\t\t\n\t\treturn self.tab_table.get_row_count()\n\t\n\tdef get_column_count(self):\n\t\t\n\t\treturn self.tab_table.get_column_count()\n\t\n\t@QtCore.Slot(int)\n\tdef on_tab_changed(self, index):\n\t\t\n\t\tif index == 1:\n\t\t\tself.populate_tab_images()\n\t\telif index == 2:\n\t\t\tself.populate_tab_graph()\n\t\t\n\t\tself.update_count()\n\t\tself.get_current_tab().on_selected()\n\t\n\t@QtCore.Slot(int)\n\tdef on_zoom(self, value):\n\t\t\n\t\tself.tab_images.set_thumbnail_size(value)\n\t\n\t@QtCore.Slot()\n\tdef on_filter(self):\n\t\t\n\t\tself._filter_timer.start(1000)\n\t\n\t@QtCore.Slot()\n\tdef on_filter_timer(self):\n\t\t\n\t\tself.tab_table.apply_filter(self.footer.get_filter_text())\n\t\tif isinstance(self.tab_images, QueryTabImages):\n\t\t\tself.tab_images.apply_filter(self.tab_table.get_item_order())\n\t\tif isinstance(self.tab_graph, QueryTabGraph):\n\t\t\tself.tab_graph.apply_filter(set([self._cmodel.get_object(obj_id) for obj_id in self.tab_table.get_obj_ids()]))\n\t\tself.update_count()\n\t\n\t@QtCore.Slot()\n\tdef on_sorted(self):\n\t\t\n\t\tself.tab_images.sort(self.tab_table.get_item_order())\n\t\n\t@QtCore.Slot()\n\tdef on_add_object(self):\n\t\t\n\t\tself.signal_add_object.emit(self._query)\n\t\n\t@QtCore.Slot()\n\tdef on_del_object(self):\n\t\t\n\t\tself.signal_del_object.emit()\n\t\n\t@QtCore.Slot()\n\tdef on_del_descriptor(self):\n\t\t\n\t\tself.signal_del_descriptor.emit()\n\t\n\t@QtCore.Slot(int)\n\tdef on_to_object(self, obj_id):\n\t\t\n\t\tif obj_id is None:\n\t\t\treturn\n\t\tself.tabs.setCurrentIndex(0)\n\t\tself.tab_table.select_object(obj_id)\n\t\n\t@QtCore.Slot(int)\n\tdef on_object_link(self, obj_id):\n\t\t\n\t\tself.on_to_object(obj_id)\n\t\n\tdef on_query_activated(self, item):\n\t\t\n\t\tself.signal_query_activated.emit(item)\n\t\n\tdef on_query_selected(self, items):\n\t\t\n\t\tself.signal_query_selected.emit(items)\n\t\t\n\t\thas_descriptor = False\n\t\tfor item in items:\n\t\t\tif (item.obj_id is not None) and (item.value is not None):\n\t\t\t\thas_descriptor = True\n\t\t\t\tbreak\n\t\tfound = False\n\t\tfor item in items:\n\t\t\tif item.obj_id is not None:\n\t\t\t\tself.relation_frame.populate(self._cmodel.get_object(item.obj_id))\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif not found:\n\t\t\tself.relation_frame.populate()\n\t\t\n\t\tself.footer.set_del_descriptor_enabled(has_descriptor)\n\t\n\tdef on_object_activated(self, obj_id):\n\t\t\n\t\tself.on_to_object(obj_id)\n\t\n\tdef on_object_selected(self, object_ids):\n\t\t\n\t\tobjects = [self._cmodel.get_object(obj_id) for obj_id in object_ids]\n\t\tobj = None\n\t\tif len(objects) == 1:\n\t\t\tobj = objects[0]\n\t\tself.relation_frame.populate()\n\t\tself.signal_object_selected.emit(objects)\n\t\n\tdef on_relation_selected(self, relations):\n\t\t\n\t\tself.signal_relation_selected.emit([(self._cmodel.get_object(source), self._cmodel.get_object(target), label) for source, target, label in relations])\n\t\n\tdef on_selected_rows(self, row_items):\n\t\t\n\t\tself.footer.set_del_object_enabled(len(row_items) > 0)\n\t\n\tdef on_edited(self, item, value):\n\t\t\n\t\tself.signal_edited.emit(item, value)\n\t\n\tdef on_drop_url(self, item, url):\n\t\t\n\t\tself.signal_drop_url.emit(item, url)\n\t\n\tdef on_deactivate(self):\n\t\t\n\t\tself.tab_table.clearSelection()\n\t\tself.tab_images.clearSelection()\n\t\tself.tab_graph.deselect_all()\n\t\n\tdef on_close(self):\n\t\t\n\t\tself._filter_timer.stop()\n\t\tself.tab_table.on_close()\n\t\tself.tab_images.on_close()\n\t\tself.tab_graph.on_close()\n\nclass QueryFooter(QtWidgets.QFrame):\n\n\tdef __init__(self, queryframe):\n\n\t\tQtWidgets.QFrame.__init__(self)\n\t\t\n\t\tself._queryframe = queryframe\n\t\tself._count_text = None\n\t\t\n\t\tself.setFrameShape(QtWidgets.QFrame.StyledPanel)\n\t\tself.setFrameShadow(QtWidgets.QFrame.Raised)\n\n\t\tself.setLayout(QtWidgets.QGridLayout())\n\t\tself.layout().setContentsMargins(5, 0, 0, 0)\n\t\tself.layout().setSpacing(0)\n\t\t\n\n\t\tself.add_object_button = QtWidgets.QToolButton()\n\t\tself.add_object_button.setIcon(self._queryframe.get_icon(\"add_object.svg\"))\n\t\tself.add_object_button.setIconSize(QtCore.QSize(24, 24))\n\t\tself.add_object_button.setAutoRaise(True)\n\t\tself.add_object_button.setToolTip(\"Add Object\")\n\t\tself.add_object_button.setContentsMargins(0, 0, 0, 0)\n\t\tself.add_object_button.clicked.connect(self._queryframe.on_add_object)\n\t\tself.layout().addWidget(self.add_object_button, 0, 0)\n\t\t\n\t\tself.del_object_button = QtWidgets.QToolButton()\n\t\tself.del_object_button.setIcon(self._queryframe.get_icon(\"remove_object.svg\"))\n\t\tself.del_object_button.setIconSize(QtCore.QSize(24, 24))\n\t\tself.del_object_button.setAutoRaise(True)\n\t\tself.del_object_button.setToolTip(\"Remove Object\")\n\t\tself.del_object_button.setContentsMargins(0, 0, 0, 0)\n\t\tself.del_object_button.clicked.connect(self._queryframe.on_del_object)\n\t\tself.layout().addWidget(self.del_object_button, 0, 1)\n\t\t\n\t\tself.del_descriptor_button = QtWidgets.QToolButton()\n\t\tself.del_descriptor_button.setIcon(self._queryframe.get_icon(\"remove_descriptor.svg\"))\n\t\tself.del_descriptor_button.setIconSize(QtCore.QSize(24, 24))\n\t\tself.del_descriptor_button.setAutoRaise(True)\n\t\tself.del_descriptor_button.setToolTip(\"Remove Descriptor\")\n\t\tself.del_descriptor_button.setContentsMargins(0, 0, 5, 0)\n\t\tself.del_descriptor_button.clicked.connect(self._queryframe.on_del_descriptor)\n\t\tself.layout().addWidget(self.del_descriptor_button, 0, 2)\n\t\t\n\t\tfilter_label = QtWidgets.QLabel(\"Filter:\")\n\t\tfilter_label.setContentsMargins(5, 0, 0, 0)\n\t\tself.layout().addWidget(filter_label, 0, 3)\n\n\t\tself._filter_edit = QtWidgets.QLineEdit()\n\t\tself._filter_edit.setContentsMargins(5, 0, 5, 0)\n\t\tself._filter_edit.textEdited.connect(self._queryframe.on_filter)\n\t\tself.layout().addWidget(self._filter_edit, 0, 4)\n\n\t\tself._count_label = QtWidgets.QLabel(\"Found: %s\")\n\t\tself._count_label.setContentsMargins(0, 0, 5, 0)\n\t\tself.layout().addWidget(self._count_label, 0, 5)\n\t\t\n\t\tself._count_text = self._count_label.text()\n\t\n\tdef get_filter_text(self):\n\t\t\n\t\treturn self._filter_edit.text()\n\t\n\tdef set_count(self, count):\n\n\t\tself._count_label.setText(self._count_text % (count))\n\t\n\tdef set_object_buttons_enabled(self, state):\n\n\t\tself.add_object_button.setVisible(state)\n\t\tself.del_object_button.setVisible(state)\n\t\n\tdef set_del_object_enabled(self, state):\n\t\t\n\t\tself.del_object_button.setEnabled(state)\n\t\n\tdef set_del_descriptor_enabled(self, state):\n\t\t\n\t\tself.del_descriptor_button.setEnabled(state)\n\n", "repo_name": "demjanp/deposit_gui", "sub_path": "src/deposit_gui/view/vmdiarea_frames/query_frame.py", "file_name": "query_frame.py", "file_ext": "py", "file_size_in_byte": 12273, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "deposit_gui.view.vmdiarea_frames.abstract_mdiarea_frame.AbstractMDIAreaFrame", "line_number": 9, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 11, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 11, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 12, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 12, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 13, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 13, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 14, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 14, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 15, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 15, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 16, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 16, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 17, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 17, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 18, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 18, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 19, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 19, "usage_type": "name"}, {"api_name": "deposit_gui.view.vmdiarea_frames.abstract_mdiarea_frame.AbstractMDIAreaFrame.__init__", "line_number": 30, "usage_type": "call"}, {"api_name": "deposit_gui.view.vmdiarea_frames.abstract_mdiarea_frame.AbstractMDIAreaFrame", "line_number": 30, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame.__init__", "line_number": 31, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 31, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 37, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 37, "usage_type": "name"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.relation_frame.RelationFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_table.QueryTabTable", "line_number": 49, "usage_type": "call"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images.QueryTabImagesLazy", "line_number": 50, "usage_type": "call"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph.QueryTabGraphLazy", "line_number": 51, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTabWidget", "line_number": 53, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 53, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QSplitter", "line_number": 58, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 58, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 58, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 58, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QSizePolicy", "line_number": 59, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 59, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 62, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 63, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 63, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QScrollArea", "line_number": 69, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 69, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 71, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 71, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QTimer", "line_number": 77, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 77, "usage_type": "name"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images.QueryTabImages", "line_number": 116, "usage_type": "argument"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph.QueryTabGraph", "line_number": 120, "usage_type": "argument"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images.QueryTabImages", "line_number": 136, "usage_type": "argument"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images.QueryTabImages", "line_number": 139, "usage_type": "call"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph.QueryTabGraph", "line_number": 149, "usage_type": "argument"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph.QueryTabGraph", "line_number": 151, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 169, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 169, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 174, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 184, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 184, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 195, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 195, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 200, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 200, "usage_type": "name"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images.QueryTabImages", "line_number": 209, "usage_type": "argument"}, {"api_name": "deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph.QueryTabGraph", "line_number": 211, "usage_type": "argument"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 205, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 205, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 215, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 215, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 220, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 220, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 225, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 225, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 230, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 230, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 235, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 235, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 243, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 243, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 314, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 314, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame.__init__", "line_number": 318, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 318, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 318, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 323, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 323, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 324, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 324, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QGridLayout", "line_number": 326, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 326, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QToolButton", "line_number": 331, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 331, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 333, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 333, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QToolButton", "line_number": 340, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 340, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 342, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 342, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QToolButton", "line_number": 349, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 349, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 351, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 351, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 358, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 358, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLineEdit", "line_number": 362, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 362, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 367, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 367, "usage_type": "name"}]} +{"seq_id": "35369598465", "text": "import sys\nimport pandas as pd\nimport numpy as np\nimport json\nimport os\nfrom datetime import date\nfrom scipy.stats import linregress\nimport yaml\nfrom momentum_data import cfg\n\nDIR = os.path.dirname(os.path.realpath(__file__))\n\npd.set_option('display.max_rows', None)\npd.set_option('display.width', None)\npd.set_option('display.max_columns', None)\n\ntry:\n with open('config.yaml', 'r') as stream:\n config = yaml.safe_load(stream)\nexcept FileNotFoundError:\n config = None\nexcept yaml.YAMLError as exc:\n print(exc)\n\nPRICE_DATA = os.path.join(DIR, \"data\", \"price_history.json\")\nACCOUNT_VALUE = cfg(\"CASH\")\nRISK_FACTOR_CFG = cfg(\"RISK_FACTOR\")\nRISK_FACTOR = RISK_FACTOR_CFG or 0.002\nMAX_STOCKS = cfg(\"STOCKS_COUNT_OUTPUT\")\nSLOPE_DAYS = cfg(\"MOMENTUM_CALCULATION_PAST_DAYS\")\nPOS_COUNT_TARGET = cfg(\"POSITIONS_COUNT_TARGET\")\nMAX_GAP = cfg(\"EXCLUDE_MAX_GAP_PCT\")\nEXCLUDE_MA_CROSSES = cfg(\"EXCLUDE_ALL_MA_CROSSES\")\n\nTITLE_RANK = \"Rank\"\nTITLE_TICKER = \"Ticker\"\nTITLE_SECTOR = \"Sector\"\nTITLE_UNIVERSE = \"Universe\"\nTITLE_PERCENTILE = \"Percentile\"\nTITLE_MOMENTUM = \"Momentum (%)\"\nTITLE_RISK = \"ATR20d\"\nTITLE_PRICE = \"Price\"\nTITLE_SHARES = \"Shares\"\nTITLE_POS_SIZE = \"Position ($)\"\nTITLE_SUM = \"Sum ($)\"\n\nif not os.path.exists('output'):\n os.makedirs('output')\n\ndef read_json(json_file):\n with open(json_file, \"r\") as fp:\n return json.load(fp)\n\ndef momentum(closes):\n \"\"\"Calculates slope of exp. regression normalized by rsquared\"\"\"\n returns = np.log(closes)\n indices = np.arange(len(returns))\n slope, _, r, _, _ = linregress(indices, returns)\n # return ((1 + slope) ** 253) * (r**2)\n return (((np.exp(slope) ** 252) - 1) * 100) * (r**2)\n\ndef atr_20(candles):\n \"\"\"Calculates last 20d ATR\"\"\"\n daily_atrs = []\n for idx, candle in enumerate(candles):\n high = candle[\"high\"]\n low = candle[\"low\"]\n prev_close = 0\n if idx > 0:\n prev_close = candles[idx - 1][\"close\"]\n daily_atr = max(high-low, np.abs(high - prev_close), np.abs(low - prev_close))\n daily_atrs.append(daily_atr)\n return pd.Series(daily_atrs).rolling(20).mean().tail(1).item()\n\ndef calc_stocks_amount(account_value, risk_factor, risk_input):\n return (np.floor(account_value * risk_factor / risk_input)).astype(int)\n\ndef calc_pos_size(amount, price):\n return np.round(amount * price, 2)\n\ndef calc_sums(account_value, pos_size):\n sums = []\n sum = 0\n stocks_count = 0\n for position in list(pos_size):\n sum = sum + position\n sums.append(sum)\n if sum < account_value:\n stocks_count = stocks_count + 1\n return (sums, stocks_count)\n\ndef positions():\n \"\"\"Returns a dataframe doubly sorted by momentum factor, with atr and position size\"\"\"\n json = read_json(PRICE_DATA)\n momentums = {}\n ranks = []\n for ticker in json:\n try:\n closes = list(map(lambda candle: candle[\"close\"], json[ticker][\"candles\"]))\n if closes and len(closes) >= 250:\n closes_series = pd.Series(closes)\n slope_series = closes_series.tail(SLOPE_DAYS[0])\n mas = closes_series.rolling(100).mean().tail(SLOPE_DAYS[0])\n ma_is_crossed = False\n if (EXCLUDE_MA_CROSSES):\n ma_crosses = slope_series < mas\n ma_crosses = ma_crosses.where(ma_crosses == True).dropna()\n ma_is_crossed = ma_crosses.size > 0\n # calculate gaps of the last 90 days\n diffs = np.abs(slope_series.pct_change().diff()).dropna()\n gaps = diffs[diffs > MAX_GAP / 100.0]\n ma = mas.tail(1).item()\n if ma > closes[-1] or ma_is_crossed:\n print(\"%s was below it's 100d moving average.\" % ticker)\n elif len(gaps):\n print(f'{ticker} has a gap > {MAX_GAP}%')\n else:\n ranks.append(len(ranks)+1)\n for idx, slope_days in enumerate(SLOPE_DAYS):\n if not slope_days in momentums:\n momentums[slope_days] = []\n mmntm = momentum(pd.Series(closes[-slope_days:]))\n momentums[slope_days].append((0, ticker, json[ticker][\"sector\"], json[ticker][\"universe\"], mmntm, atr_20(json[ticker][\"candles\"]), closes[-1]))\n except KeyError:\n print(f'Ticker {ticker} has corrupted data.')\n slope_std = SLOPE_DAYS[0]\n dfs = []\n for slope_days in SLOPE_DAYS:\n slope_suffix = f'_{slope_days}' if slope_days != slope_std else ''\n df = pd.DataFrame(momentums[slope_days], columns=[TITLE_RANK, TITLE_TICKER, TITLE_SECTOR, TITLE_UNIVERSE, TITLE_MOMENTUM, TITLE_RISK, TITLE_PRICE])\n df = df.sort_values(([TITLE_MOMENTUM]), ascending=False)\n df[TITLE_RANK] = ranks\n # df[TITLE_PERCENTILE] = pd.qcut(df[TITLE_MOMENTUM], 100, labels=False)\n df = df.head(MAX_STOCKS)\n risk_factor = RISK_FACTOR\n calc_runs = 2\n for run in range(1,calc_runs+1,1):\n # recalculate for positions target\n if run > 1 and not RISK_FACTOR_CFG and POS_COUNT_TARGET and (stocks_count < POS_COUNT_TARGET or stocks_count - POS_COUNT_TARGET > 1):\n risk_factor = RISK_FACTOR * (stocks_count / POS_COUNT_TARGET)\n df[TITLE_SHARES] = calc_stocks_amount(ACCOUNT_VALUE, risk_factor, df[TITLE_RISK])\n df[TITLE_POS_SIZE] = calc_pos_size(df[TITLE_SHARES], df[TITLE_PRICE])\n (sums, stocks_count) = calc_sums(ACCOUNT_VALUE, df[TITLE_POS_SIZE])\n df[TITLE_SUM] = sums\n\n df.to_csv(os.path.join(DIR, \"output\", f'mmtm_posis{slope_suffix}.csv'), index = False)\n\n watchlist = open(os.path.join(DIR, \"output\", f'Momentum{slope_suffix}.txt'), \"w\")\n first_10_pf = \"\"\n tv_ticker_count = 0\n for index, row in df.iterrows():\n plus_sign = \"\" if tv_ticker_count == 0 else \"+\"\n # TradingView only supports combining 10 Tickers :(((\n if row[TITLE_POS_SIZE] > 0 and row[TITLE_SUM] <= ACCOUNT_VALUE and tv_ticker_count < 10:\n tv_ticker_count = tv_ticker_count + 1\n first_10_pf = f'{first_10_pf}{plus_sign}{row[TITLE_SHARES]}*{row[TITLE_TICKER]}'\n # first_10_combined = f'{first_10_combined})/{tv_ticker_count}'\n watchlist_stocks = ','.join(df.head(MAX_STOCKS)[TITLE_TICKER])\n watchlist.write(f'{first_10_pf},{watchlist_stocks}')\n watchlist.close()\n\n dfs.append(df)\n\n return dfs\n\n\ndef main():\n posis = positions()\n print(posis[0])\n print(\"***\\nYour 'mmtm_posis.csv' is in the output folder.\\n***\")\n if cfg(\"EXIT_WAIT_FOR_ENTER\"):\n input(\"Press Enter key to exit...\")\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "skyte/momentum", "sub_path": "momentum_posis.py", "file_name": "momentum_posis.py", "file_ext": "py", "file_size_in_byte": 6823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 15, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 19, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "momentum_data.cfg", "line_number": 26, "usage_type": "call"}, {"api_name": "momentum_data.cfg", "line_number": 27, "usage_type": "call"}, {"api_name": "momentum_data.cfg", "line_number": 29, "usage_type": "call"}, {"api_name": "momentum_data.cfg", "line_number": 30, "usage_type": "call"}, {"api_name": "momentum_data.cfg", "line_number": 31, "usage_type": "call"}, {"api_name": "momentum_data.cfg", "line_number": 32, "usage_type": "call"}, {"api_name": "momentum_data.cfg", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 48, "usage_type": "call"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "momentum_data.cfg", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "23572971991", "text": "import sys\nassert sys.version_info >= (3, 5)\nfrom collections import Counter\n\n\ndef solve(prefix):\n N, K = [int(_) for _ in input().split()]\n lv = ((N, 1),)\n k = 1\n while K > k:\n assert sum(t[1] for t in lv) == k\n assert 1 <= len(lv) <= 2\n assert len(lv) == 1 or lv[0][0] == lv[1][0]+1\n K -= k\n k <<= 1\n nlv = Counter()\n for nn, kk in lv:\n nlv[nn>>1] += kk\n nlv[((nn+1)>>1)-1] += kk\n lv = tuple(sorted(nlv.items(), key=lambda t: t[0], reverse=True))\n n = lv[0][0] if K <= lv[0][1] else lv[1][0]\n print('{}{} {}'.format(prefix, n>>1, ((n+1)>>1)-1))\n\n\ndef main():\n T = int(input())\n for t in range(T):\n solve(prefix='Case #{}: '.format(t+1))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_201/635.py", "file_name": "635.py", "file_ext": "py", "file_size_in_byte": 788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.version_info", "line_number": 2, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "24691258733", "text": "# -*- coding:utf-8 -*-\nimport socket\nimport pyaudio\nimport Dynaknock\n\nCHUNK = 512\nRATE = 44100\n\n\ndef create_server_socket(port):\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_sock.bind(('', port))\n server_sock.listen(256)\n print('Server Run Port:{}'.format(port))\n return server_sock\n\n\ndef accept_loop(server_sock):\n print('Ready For Accept')\n new_sock, (remote_host, remote_remport) = server_sock.accept()\n return new_sock\n\n\ndef create_audio_stream(chunk, rate):\n p = pyaudio.PyAudio()\n stream = p.open(\n format=pyaudio.paInt16,\n channels=1,\n rate=rate,\n input=True,\n output=False,\n frames_per_buffer=chunk\n )\n return stream\n\n\nif __name__ == \"__main__\":\n\n # Launch Socket Server\n server_sock = create_server_socket(7777)\n\n # waiting connection from client\n sock = accept_loop(server_sock)\n\n # create audio stream\n stream = create_audio_stream(CHUNK, RATE)\n\n # start detection\n analyzer = Dynaknock.Analyzer(stream, sock)\n analyzer.start_detection()", "repo_name": "Hikaru-Ito/DynaKnock", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "socket.socket", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 12, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 26, "usage_type": "call"}, {"api_name": "pyaudio.paInt16", "line_number": 28, "usage_type": "attribute"}, {"api_name": "Dynaknock.Analyzer", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "3482749906", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\n# choices are used in the form, but not enforced at the model level\nEVENT_TYPES = [(v,v) for v in (\n ('Aerial Bombardment'),\n ('Artillery Bombardment'),\n ('Attack: IED'),\n ('Attack: Mine'),\n ('Attack: Small Arms'),\n ('Attack: Mechanized'),\n ('Attack on Vehicles'),\n ('Armed Incursion'),\n ('Troop Movement'),\n ('Civilian Displacement'),\n)]\n\nEVIDENCE_SOURCES = [(v,v) for v in (\n ('Confidential Source'),\n ('Satellite Image'),\n ('NGO Report'),\n ('UN Report'),\n ('Media Report'),\n ('Government Report'),\n ('HSBA - Small Arms Survey'),\n ('SVM (Sudan Vote Monitor)'),\n)]\n\nACTORS = [(v,v) for v in (\n ('SAF (Sudan Armed Forces)'),\n ('NGO (Non-Governmental Organization)'),\n ('SPLA/GOSS (Sudan\\'s People Liberation Army/Government of South Sudan)'),\n ('UNMIS (United Nations Mission In Sudan)'),\n ('Militia - Specify militia'),\n ('PDF (Popular Defense Force)'),\n)]\n\nclass Event(models.Model):\n summary = models.CharField(max_length=255, verbose_name=\"Summary\", help_text=\"Short summary of event (one phrase or sentence)\")\n type = models.CharField(max_length=255, choices=EVENT_TYPES, verbose_name=\"Event Type\")\n date = models.DateField(verbose_name=\"Event Date\", help_text=\"Date on which the event occurred\")\n location = models.CharField(max_length=255, verbose_name=\"Location\", help_text=\"Where the event occurred\")\n lat = models.FloatField(blank=True, verbose_name=\"Latitude\")\n lon = models.FloatField(blank=True, verbose_name=\"Longitude\")\n actor = models.CharField(max_length=255, choices=ACTORS, help_text=\"The group or persons responsible for the event\")\n population = models.CharField(max_length=255, verbose_name=\"Affected Population\", \n help_text='Specify \"Internally Displaced Persons\", \"Residents of [Town]\", a particular military unit, etc')\n notes = models.TextField(blank=True, verbose_name=\"Notes\")\n logger = models.ForeignKey(User)\n \n class Meta:\n ordering = ['-date']\n \n def __unicode__(self):\n return self.name\n \n def get_absolute_url(self):\n return reverse('event_view', kwargs={\"id\": self.id})\n\nclass Evidence(models.Model):\n summary = models.CharField(max_length=255, verbose_name=\"Summary\", help_text=\"Short summary of evidence (one phrase or sentence)\")\n event = models.ForeignKey(Event, verbose_name=\"Related Event\", help_text=\"The event to which this evidence is related, if known\")\n source = models.CharField(max_length=255, choices=EVIDENCE_SOURCES, verbose_name=\"Source\")\n confidential_id = models.CharField(max_length=255, verbose_name=\"Confidential Source ID\")\n confidential_link = models.CharField(max_length=255, verbose_name=\"Confidential Source Link/File Number\")\n source_link = models.CharField(max_length=255, verbose_name=\"Source Link\", help_text=\"URL of source report or image\")\n # do we need a file upload option here?\n notes = models.TextField(blank=True, verbose_name=\"Notes\")\n logger = models.ForeignKey(User)\n ", "repo_name": "nrabinowitz/ssp_tracker", "sub_path": "tracker/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.models.Model", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 70, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "16300546167", "text": "\n# coding: utf-8\n\n# In[1]:\n\n# Since the goal of the push is not specified in the test,\n# I assume that Freshr push notifications aim at engaging\n# users who are not currently active in the app\n# maybe through recommendations\n\n# Here I am getting for each user the day(s)\n# of the week when they are not active in the app\n# I also get for each of them the time slots when they are the most active \n# because push notifications must be sent at appropriate time,\n# when they are less likely to interrupt the user\n\n# In[2]:\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport time\nimport sys\n\n# In[3]:\n\n# Hourly time slots in a day\ntime_slots = [(i,i+1) for i in range(24)]\nweekdays = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}\n\n# Convert milliseconds timestamp to string formatted datetime\ndef ms_to_datetime(timestamp):\n return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp/1000.0))\n\n# Convert string formatted datetime to datetime object\ndef str_to_datetime(string):\n return datetime.strptime(string, '%Y-%m-%d %H:%M:%S')\n\n# Get the time slot from a string formatted datetime\ndef get_time_slot(string):\n hour = str_to_datetime(string).hour\n return time_slots[hour]\n\n# Get the weekday from a string formatted datetime\ndef get_weekday(string):\n dt_obj = str_to_datetime(string)\n return dt_obj.weekday()\n\n# Read CSV dataset into a dataframe, clean NaN and duplicates\n# and add columns that convert milliseconds timestamps to string formatted datetime\ndef csv_to_clean_df(dataset_path):\n df = pd.read_csv(dataset_path)\n df.dropna(inplace=True)\n df.drop_duplicates(inplace=True)\n ms_cols = ['watermark', 'timestamp']\n df[ms_cols] = df[ms_cols].applymap(ms_to_datetime)\n df['time_slot'] = df['timestamp'].apply(get_time_slot)\n df['weekday'] = df['timestamp'].apply(get_weekday)\n #df.head()\n return df\n\n# For each user get the days when he is inactive in the app\ndef get_inactive_weekdays(per_user_active_weekdays):\n weekdays_index = [key for key in weekdays]\n per_user_active_weekdays['inactive_weekdays'] = per_user_active_weekdays['active_weekdays']\n for i, user_id in enumerate(per_user_active_weekdays['user_id']):\n per_user_active_weekdays.set_value(i, 'inactive_weekdays',\n np.setdiff1d(weekdays_index,\n per_user_active_weekdays.ix[i, 'active_weekdays']).tolist())\n return per_user_active_weekdays\n\n# In[4]:\n\ndef main(argv):\n start = datetime.now()\n try:\n df = csv_to_clean_df(argv[0])\n except:\n print('File path argument error')\n sys.exit(1)\n # Get number of times each user opened the conversation for each 1h-window/slot in a day\n per_user_time_slot_count = df.groupby(['user_id', 'time_slot']).time_slot.count().reset_index(name=\"count\")\n # Get number of times each user opened the conversation for each weekday\n per_user_weekday_count = df.groupby(['user_id', 'weekday']).weekday.count().reset_index(name=\"count\")\n # Get the weekdays when the user is active in the app\n per_user_active_weekdays = per_user_weekday_count.groupby(['user_id'])['weekday']\\\n .apply(lambda x: list(x))\\\n .reset_index(name='active_weekdays')\n # Get also the weekdays when the user is not active in the app\n per_user_active_weekdays = get_inactive_weekdays(per_user_active_weekdays)\n #per_user_active_weekdays.head()\n\n # Get for each user the time slots in a day when he uses the most the application\n per_user_time_slots_max_count = per_user_time_slot_count.groupby(['user_id'])['count'].max().reset_index()\n per_user_most_active_time_slots = pd.merge(per_user_time_slot_count,\n per_user_time_slots_max_count,\n on=['user_id', 'count'])\n per_user_most_active_time_slots = per_user_most_active_time_slots.groupby(['user_id'])['time_slot']\\\n .apply(lambda x: list(x)) \\\n .reset_index(name=\"time_slots\")\n\n # Merge the results, make them readable and write the final dataframe to a CSV file\n per_user_best_time = pd.merge(per_user_active_weekdays,\n per_user_most_active_time_slots,\n on='user_id').drop('active_weekdays', axis=1)\n per_user_best_time['inactive_weekdays'] = per_user_best_time['inactive_weekdays'].apply(lambda x: [weekdays[idx] for idx in x])\n per_user_best_time['time_slots'] = per_user_best_time['time_slots'].apply(lambda slots: [\"between {0}h and {1}h\".format(slot[0], slot[1]) for slot in slots])\n per_user_best_time.to_csv(argv[1])\n print(datetime.now() - start)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print('Too few arguments')\n sys.exit(1)\n elif len(sys.argv) > 3:\n print('Too many arguments')\n sys.exit(1)\n main(sys.argv[1:])\n\n\n", "repo_name": "Rigzzzz/Freshr", "sub_path": "freshr_predict_push_time.py", "file_name": "freshr_predict_push_time.py", "file_ext": "py", "file_size_in_byte": 5213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.strftime", "line_number": 33, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.setdiff1d", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 109, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 115, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 118, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}]} +{"seq_id": "27422552922", "text": "from requests_oauthlib import OAuth2Session\n\nfrom flask import Flask, request, redirect, session, url_for\nfrom flask.json import jsonify\n\n# This information is obtained upon registration of a new GitHub\nclient_id = \"0oaps3etzD5o6oIBE5d6\"\nclient_secret = \"TB7kYfe86zZl294mhE8UXIO4ofV5gqEkvZBs3Net\"\nauthorization_base_url = 'https://dev-9590480.okta.com/oauth2/v1/authorize'\ntoken_url = 'https://dev-9590480.okta.com/oauth2/v1/token'\n\napp = Flask(__name__)\ng_state = \"\"\n\n@app.route(\"/login\")\ndef login():\n keycloak = OAuth2Session(client_id)\n authorization_url, state = keycloak.authorization_url(authorization_base_url)\n print(f\"authorization_url: {authorization_url}\")\n print(f\"STATE: {state}\")\n \n # State is used to prevent CSRF, keep this for later.\n session['oauth_state'] = state\n g_state = state\n return redirect(authorization_url)\n\n@app.route(\"/callback\")\ndef callback():\n # keycloak = OAuth2Session(client_id, state=session['oauth_state'])\n keycloak = OAuth2Session(client_id)\n token = keycloak.fetch_token(token_url, client_secret=client_secret,\n authorization_response=request.url)\n\n print(f\"TOKEN: {token}\")\n res = jsonify(keycloak.get('https://dev-9590480.okta.com/oauth2/v1/userinfo').json())\n print(f\"USER : {res}\")\n return res\n\n@app.route(\"/test\")\ndef test():\n return \"test result\"\n\nif __name__ == '__main__':\n app.run(port=5000,debug=True)\n", "repo_name": "shawnhankim/cori-nginx", "sub_path": "auth/oauth2.0/flask/01-flask-example.py", "file_name": "01-flask-example.py", "file_ext": "py", "file_size_in_byte": 1439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.json.jsonify", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "18313809416", "text": "# This program - \n# 1. Sends a post request to access dynamic token\n# 2. Use the dynamic (bearer) token to post data \n# Note: my backend server expects a bearer token for auth and a base64 encoded data for processing.\n# 3. Print the reponse received from the post request \n# Note: upon successful authentication, my backend server returns another base64 encoded data)\n\n\nfrom os import system\nimport requests\nimport json\n\nbase64EncodedInput = 'SUkqALItAAD / O6ImgZU7sDwzM6wq//pimOOKQkThU07WGF7TT7wqmHWGAAAFABULgAAGwEFAAEAAA3wwe/iHYnE7W7HFIRexUUZhFYpEDAAEAAAACAAAAAAAAAAIAA='\n\n\n# construct header and payload with clientid, client_secret.\ndef get_headers_for_token_request():\n token_url = 'https://api-internal.pod.myplaygrounddomain.net/apip/auth/v2/token'\n token_header = {'Accept': 'application/json','Content-Type': 'application/json',}\n token_payload = {'client_id': 'oo1nclud3ooyourooclientidoohereoo','client_secret': 'oo1nclud3ooyouroocl13ntooS3cr3toohereoo','grant_type': 'client_credentials',}\n return token_url, token_header, token_payload\n\n\n# construct header and payload with input data for post request\ndef get_headers_for_post(token):\n post_url = \"https://api-internal.pod.myplaygrounddomain.net/v1/p2e/api/invocations\"\n post_headers = {'Accept': 'application/json','Content-type': 'application/json','Authorization': 'Bearer ' + token,}\n post_payload ={'name' : 'what-ever-applicable', 'role' : 'creator', 'public_key' : ''+ base64EncodedInput,}##pay\n return post_url, post_headers, post_payload\n\n\n# a post method - returns a response (token_url is designed to return a bearer token in my environment)\ndef get_token(token_Url, token_header, token_payload):\n token_response = requests.post(token_Url, headers=token_header, json=token_payload)\n return token_response\n\n# another post method - returns a resonse (post_url is designed to return an encoded data)\ndef post_and_get_response(post_Url, post_headers, post_payload ):\n response = requests.post(post_Url, headers=post_headers, json=post_payload)\n return response\n\n\n\nif __name__ == \"__main__\":\n system('cls')\n\n token_url, token_header, token_payload = get_headers_for_token_request()\n resp_token = get_token(token_url, token_header, token_payload).json()\n #print (resp_token)\n token = resp_token[\"access_token\"]##ignore all content except the access_token\n #print (token)\n\n post_Url, post_headers, post_payload = get_headers_for_post(token)\n resp = post_and_get_response(post_Url, post_headers, post_payload).json()\n print (resp)\n", "repo_name": "0x218/Python", "sub_path": "http/post_with_dynamic_token.py", "file_name": "post_with_dynamic_token.py", "file_ext": "py", "file_size_in_byte": 2570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 39, "usage_type": "call"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "28771001505", "text": "import mock\n\nclass IutilTest(mock.TestCase):\n def setUp(self):\n self.setupModules(\n ['_isys', 'logging', 'pyanaconda.anaconda_log', 'block'])\n\n import pyanaconda\n pyanaconda.anaconda_log = mock.Mock()\n\n def tearDown(self):\n self.tearDownModules()\n\n def copy_to_sysimage_test(self):\n from pyanaconda import iutil\n fs = mock.DiskIO()\n self.take_over_io(fs, iutil)\n self.assertEqual(iutil.copy_to_sysimage(\"/etc/securetty\"), False)\n\n fs[\"/etc/securetty\"] = \"tty1\"\n iutil.os.makedirs = mock.Mock()\n iutil.shutil.copy = mock.Mock()\n self.assertEqual(iutil.copy_to_sysimage(\"/etc/securetty\"), True)\n iutil.os.makedirs.assert_called_with(\"/mnt/sysimage/etc\")\n iutil.shutil.copy.assert_called_with(\"/etc/securetty\",\n \"/mnt/sysimage/etc/securetty\")\n", "repo_name": "mattias-ohlsson/anaconda", "sub_path": "tests/pyanaconda_test/iutil_test.py", "file_name": "iutil_test.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "mock.TestCase", "line_number": 3, "usage_type": "attribute"}, {"api_name": "pyanaconda.anaconda_log", "line_number": 9, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 9, "usage_type": "call"}, {"api_name": "mock.DiskIO", "line_number": 16, "usage_type": "call"}, {"api_name": "pyanaconda.iutil", "line_number": 17, "usage_type": "name"}, {"api_name": "pyanaconda.iutil.copy_to_sysimage", "line_number": 18, "usage_type": "call"}, {"api_name": "pyanaconda.iutil", "line_number": 18, "usage_type": "name"}, {"api_name": "pyanaconda.iutil.os", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pyanaconda.iutil", "line_number": 21, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 21, "usage_type": "call"}, {"api_name": "pyanaconda.iutil.shutil", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pyanaconda.iutil", "line_number": 22, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 22, "usage_type": "call"}, {"api_name": "pyanaconda.iutil.copy_to_sysimage", "line_number": 23, "usage_type": "call"}, {"api_name": "pyanaconda.iutil", "line_number": 23, "usage_type": "name"}, {"api_name": "pyanaconda.iutil.os.makedirs.assert_called_with", "line_number": 24, "usage_type": "call"}, {"api_name": "pyanaconda.iutil.os", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pyanaconda.iutil", "line_number": 24, "usage_type": "name"}, {"api_name": "pyanaconda.iutil.shutil.copy.assert_called_with", "line_number": 25, "usage_type": "call"}, {"api_name": "pyanaconda.iutil.shutil", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pyanaconda.iutil", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "74822392193", "text": "\nimport json\nimport platform\nfrom time import sleep\n# os.system()\nimport os\nimport sys\n\nfrom build_sys.data_str import usage_text\nfrom build_sys.tools import test, sanitize, debug, benchmark, run, clean\nfrom build_sys.build import build\nfrom build_sys.change_name import change_proj_name\nfrom build_sys.bootstrap import bootstrap\nfrom build_sys.prod import prod\nfrom build_sys.docker import docker_build\nname=\"\"\ndef main():\n\n usage = usage_text()\n cof = open('.config/data.json')\n data = json.loads(cof.read())\n if len(sys.argv) == 1:\n print(usage)\n else:\n if str(sys.argv[1]) == \"build\":\n build(data)\n elif str(sys.argv[1]) == \"run\":\n run(data)\n elif str(sys.argv[1]) == \"bootstrap\":\n bootstrap(data)\n elif str(sys.argv[1]) == \"dev\":\n build(data)\n run(data)\n elif str(sys.argv[1]) == \"clean\":\n clean(data)\n elif str(sys.argv[1]) == \"rename\" and len(sys.argv) == 3:\n change_proj_name(data)\n\n elif str(sys.argv[1]) == \"debug\":\n debug(data)\n elif str(sys.argv[1]) == \"test\":\n test(data)\n elif str(sys.argv[1]) == \"sanitize\":\n sanitize(data)\n elif str(sys.argv[1]) == \"benchmark\":\n benchmark(data)\n elif str(sys.argv[1]) == \"prod\":\n prod(data)\n elif str(sys.argv[1]) == \"docker\":\n if len(sys.argv) == 2:\n print(\"docker\")\n elif len(sys.argv) == 3 and sys.argv[2] ==\"build\":\n docker_build(data)\n \n else:\n print(\"invalid arguments\")\n\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "danikhan632/create-cpp-app", "sub_path": "scripts.py", "file_name": "scripts.py", "file_ext": "py", "file_size_in_byte": 1684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "build_sys.data_str.usage_text", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "build_sys.build.build", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "build_sys.tools.run", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "build_sys.bootstrap.bootstrap", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "build_sys.build.build", "line_number": 32, "usage_type": "call"}, {"api_name": "build_sys.tools.run", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "build_sys.tools.clean", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "build_sys.change_name.change_proj_name", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "build_sys.tools.debug", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "build_sys.tools.test", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "build_sys.tools.sanitize", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}, {"api_name": "build_sys.tools.benchmark", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "build_sys.prod.prod", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}, {"api_name": "build_sys.docker.docker_build", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "4525189528", "text": "from django.urls import path\r\n\r\nfrom . import views\r\n\r\n# to add app_name= \" something \" then add the namespace=\" something \" in url of the app in root directory urls.py page\r\napp_name = 'polls'\r\nurlpatterns = [\r\n # :8000/list/ not working gives error 404 not found.\r\n # works with :8000/polls/list/\r\n path('list/', views.polls_list, name='list'),\r\n\r\n # to add new poll in polls list.\r\n path('new/', views.new_poll, name='new'),\r\n\r\n # to edit the existing poll in polls list. (polls/edit/1/)\r\n path('edit/', views.edit_poll, name='edit_poll'),\r\n\r\n # to delete poll\r\n path('delete/poll/', views.delete_poll, name='delete_poll'),\r\n\r\n # to add new choice\r\n path('edit//choice/add/', views.add_choice, name='add_choice'),\r\n\r\n # to edit choice\r\n path('edit/choice//', views.edit_choice, name='edit_choice'),\r\n\r\n # to delete a choice\r\n path('delete/choice//', views.delete_choice, name='delete_choice'),\r\n\r\n # for polls/details/1/ note:it will give details about polls question\r\n path('details//', views.poll_detail, name='detail'),\r\n\r\n # for form action after voting\r\n # polls/details/1/vote/\r\n path('details//vote/', views.poll_vote, name='vote'),\r\n\r\n]\r\n", "repo_name": "hedagaurav/PollMe", "sub_path": "polls/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "12948321908", "text": "from urlparse import urlparse\nimport httplib, sys, multiprocessing\nimport os\nimport signal\nimport json\nimport time\nfrom datetime import datetime\nimport logging\nimport logging.config\nfrom monascaclient import client\n\nkeystone = {\n 'username': 'mini-mon',\n 'password': 'password',\n 'project': 'test',\n 'auth_url': 'http://192.168.10.5:35357/v3'\n}\n\n# Run as \"python agent_simulator.py\n\nRNDD_KAFKA0002 = 'http://127.0.0.1:8080/v2.0/metrics'\nMINI_MON = 'http://192.168.10.4:8080/v2.0/'\n\n# select which API URL to use\napi_url = MINI_MON\n\n# num_process x num_requests will be the number of http connections. \n# beware that 20,000 connections will cause too many ephemeral ports used\n# on a single api server (with one ipaddress). Would recommend not greater than 1000\nnum_processes = 1\n\n# number of requests sent per interval (normally 1-20 max if doing continuous)\nnum_requests = 2\n\n# the agent sends anywhere between 40-360 metrics per request\nnum_metrics_per_request = 100\n\n# (for continuous) The seconds to wait to send metrics. valid range 1-60 (lowest recommended is 10 by the agent)\nagent_interval = 60\n\n# when False runs once, when True runs continuously sending num_requests every interval.\ncontinuous = False\n\nlog = logging.getLogger(__name__)\n\n\nprocessors = [] # global list to facilitate clean signal handling\nexiting = False\n\nclass MetricCreatorSimple():\n \"\"\" Generates metrics\n \"\"\"\n def __init__(self, proc_num):\n self.proc_num = proc_num\n self.num_calls = 0\n self.start_time = int((time.time() - 120)*1000)\n\n def create_metric(self):\n metric = {\"name\": \"cube\" + str(self.proc_num),\n \"dimensions\": {\"hostname\": \"server-\" + str(self.proc_num)},\n \"timestamp\": self.start_time+self.num_calls,\n \"value\": self.num_calls}\n self.num_calls += 1\n return metric\n\nclass agent_sim_process():\n \"\"\"Simulate a monasca agent\n arguments\n proc_num - identifying number for the agent\n num_requests - how many requests the agent makes per interval\n num_metrics - how many metrics are in each request\n continuous - run once or forever\n queue - (multiprocessing.Queue) if provided, agent will use to report number of metrics sent\n metric_creator - agent will call \"create_metric\" method from this object and will pass in proc_num\n token - what token should the agent use, will generate its own token if none provided\n\n The process will report the number of metrics for each batch request to the q, it will also send exceptions\n it encounters. If no queue is provided, it will print these instead.\n \"\"\"\n def __init__(self, proc_num, num_requests, num_metrics, api_url, keystone_dict, continuous=False, interval=60, queue=None,\n metric_creator=MetricCreatorSimple, token=None):\n self.proc_num = proc_num\n self.num_requests = num_requests\n self.num_metrics = num_metrics\n self.interval = interval\n self.continuous = continuous\n self.queue = queue\n if not token:\n try:\n token = ksclient.KSClient(**keystone_dict).token\n except Exception as ex:\n print(\"Agent {}: Failed to get auth token from keystone\\n{}\".format(self.proc_num, keystone_dict))\n #print(\"Using token: \" + token)\n self.mon_client = client.Client('2_0', api_url, session=token)\n self.metric_creator = metric_creator(proc_num)\n #print(\"Created agent {}\".format(self.proc_num))\n\n def do_work_continuously(self):\n while True:\n start_send = time.time()\n for x in xrange(self.num_requests):\n self.post_metrics()\n end_send = time.time()\n\n secs = end_send - start_send\n if secs < self.interval:\n sleep_interval = self.interval - secs\n else:\n sleep_interval = 0\n #print (\"send seconds %f took longer than interval %f, not sleeping\" % (secs, self.interval))\n #print (\"send time = %f, sleep time = %f\" % (secs, sleep_interval) )\n time.sleep(sleep_interval)\n \n def do_work_once(self):\n start_send = time.time()\n for x in xrange(self.num_requests):\n self.post_metrics()\n end_send = time.time()\n secs = end_send - start_send\n #print (\"send time in seconds = %f\" % (secs))\n\n def post_metrics(self):\n try:\n body = []\n for i in xrange(self.num_metrics):\n body.append(self.metric_creator.create_metric())\n self.mon_client.metrics.create(jsonbody=body)\n if self.queue:\n self.queue.put(self.num_metrics)\n except Exception as ex:\n if self.queue:\n self.queue.put(ex)\n else:\n print(ex)\n\n def run(self):\n if self.continuous:\n self.do_work_continuously()\n else:\n self.do_work_once()\n\n\ndef clean_exit(signum, frame=None):\n \"\"\"\n Exit all processes attempting to finish uncommited active work before exit.\n Can be called on an os signal or no zookeeper losing connection.\n \"\"\"\n global exiting\n if exiting:\n # Since this is set up as a handler for SIGCHLD when this kills one child it gets another signal, the global\n # exiting avoids this running multiple times.\n log.debug('Exit in progress clean_exit received additional signal %s' % signum)\n return\n\n log.info('Received signal %s, beginning graceful shutdown.' % signum)\n exiting = True\n\n for process in processors:\n try:\n if process.is_alive():\n process.terminate()\n except Exception:\n pass\n\n # Kill everything, that didn't already die\n for child in multiprocessing.active_children():\n log.debug('Killing pid %s' % child.pid)\n try:\n os.kill(child.pid, signal.SIGKILL)\n except Exception:\n pass\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n print (\"continuous = %d\") % continuous\n print (\"using URL: %s\") % api_url\n print (\"num_process = %d\" % num_processes)\n print (\"num_metrics_per_request = %d\" % num_metrics_per_request)\n print (\"num requests (sent per interval if continuous) = %d\") % num_requests\n print (\"interval (secs) = %d\" % agent_interval)\n print (\"total metrics sent (per interval) = %d\" % (num_processes * num_requests * num_metrics_per_request))\n print (\"total connections (per interval) = %d\" % (num_processes * num_requests))\n\n log.info('num_processes %d', num_processes)\n for x in xrange(0, num_processes): \n p = multiprocessing.Process(\n target=agent_sim_process(x, num_requests, num_metrics_per_request, api_url, continuous,\n keystone, agent_interval).run\n )\n processors.append(p)\n\n ## Start\n try:\n log.info('Starting processes')\n print ('Starting processes %s' % str(datetime.now()))\n start = time.time()\n for process in processors:\n process.start()\n\n # The signal handlers must be added after the processes start otherwise they run on all processes\n signal.signal(signal.SIGCHLD, clean_exit)\n signal.signal(signal.SIGINT, clean_exit)\n signal.signal(signal.SIGTERM, clean_exit)\n\n log.info('calling Process.join() ')\n for process in processors:\n process.join()\n end = time.time()\n print (\"runtime = %d seconds\" % (end - start))\n except Exception:\n print ('Error! Exiting.')\n for process in processors:\n process.terminate()\n end = time.time()\n print (\"runtime = %d seconds\" % (end - start))\n", "repo_name": "monasca/monasca-perf", "sub_path": "monasca_perf/agent_sim.py", "file_name": "agent_sim.py", "file_ext": "py", "file_size_in_byte": 7873, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "monascaclient.client.Client", "line_number": 94, "usage_type": "call"}, {"api_name": "monascaclient.client", "line_number": 94, "usage_type": "name"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 112, "usage_type": "call"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "time.time", "line_number": 118, "usage_type": "call"}, {"api_name": "multiprocessing.active_children", "line_number": 166, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 169, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 169, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 173, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 188, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 197, "usage_type": "name"}, {"api_name": "time.time", "line_number": 198, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 203, "usage_type": "call"}, {"api_name": "signal.SIGCHLD", "line_number": 203, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 204, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 204, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 205, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 205, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 210, "usage_type": "call"}, {"api_name": "time.time", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "5855025940", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport warnings\nimport tempfile\nimport shutil\nfrom subprocess import check_call\nfrom tarfile import TarFile\nfrom pkgutil import get_data\nfrom io import BytesIO\nfrom contextlib import closing\n\nfrom dateutil.tz import tzfile\n\n__all__ = [\"gettz\", \"rebuild\"]\n\n_ZONEFILENAME = \"dateutil-zoneinfo.tar.gz\"\n\n# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but\n# it's close enough for python2.6\n_tar_open = TarFile.open\nif not hasattr(TarFile, '__exit__'):\n def _tar_open(*args, **kwargs):\n return closing(TarFile.open(*args, **kwargs))\n\n\nclass tzfile(tzfile):\n def __reduce__(self):\n return (gettz, (self._filename,))\n\n\ndef getzoneinfofile_stream():\n try:\n return BytesIO(get_data(__name__, _ZONEFILENAME))\n except IOError as e: # TODO switch to FileNotFoundError?\n warnings.warn(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n return None\n\n\nclass ZoneInfoFile(object):\n def __init__(self, zonefile_stream=None):\n if zonefile_stream is not None:\n with _tar_open(fileobj=zonefile_stream, mode='r') as tf:\n # dict comprehension does not work on python2.6\n # TODO: get back to the nicer syntax when we ditch python2.6\n # self.zones = {zf.name: tzfile(tf.extractfile(zf),\n # filename = zf.name)\n # for zf in tf.getmembers() if zf.isfile()}\n self.zones = dict((zf.name, tzfile(tf.extractfile(zf),\n filename=zf.name))\n for zf in tf.getmembers() if zf.isfile())\n # deal with links: They'll point to their parent object. Less\n # waste of memory\n # links = {zl.name: self.zones[zl.linkname]\n # for zl in tf.getmembers() if zl.islnk() or zl.issym()}\n links = dict((zl.name, self.zones[zl.linkname])\n for zl in tf.getmembers() if\n zl.islnk() or zl.issym())\n self.zones.update(links)\n else:\n self.zones = dict()\n\n\n# The current API has gettz as a module function, although in fact it taps into\n# a stateful class. So as a workaround for now, without changing the API, we\n# will create a new \"global\" class instance the first time a user requests a\n# timezone. Ugly, but adheres to the api.\n#\n# TODO: deprecate this.\n_CLASS_ZONE_INSTANCE = list()\n\n\ndef gettz(name):\n if len(_CLASS_ZONE_INSTANCE) == 0:\n _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))\n return _CLASS_ZONE_INSTANCE[0].zones.get(name)\n\n\ndef rebuild(filename, tag=None, format=\"gz\", zonegroups=[]):\n \"\"\"Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*\n\n filename is the timezone tarball from ftp.iana.org/tz.\n\n \"\"\"\n tmpdir = tempfile.mkdtemp()\n zonedir = os.path.join(tmpdir, \"zoneinfo\")\n moduledir = os.path.dirname(__file__)\n try:\n with _tar_open(filename) as tf:\n for name in zonegroups:\n tf.extract(name, tmpdir)\n filepaths = [os.path.join(tmpdir, n) for n in zonegroups]\n try:\n check_call([\"zic\", \"-d\", zonedir] + filepaths)\n except OSError as e:\n if e.errno == 2:\n logging.error(\n \"Could not find zic. Perhaps you need to install \"\n \"libc-bin or some other package that provides it, \"\n \"or it's not in your PATH?\")\n raise\n target = os.path.join(moduledir, _ZONEFILENAME)\n with _tar_open(target, \"w:%s\" % format) as tf:\n for entry in os.listdir(zonedir):\n entrypath = os.path.join(zonedir, entry)\n tf.add(entrypath, entry)\n finally:\n shutil.rmtree(tmpdir)\n", "repo_name": "googlearchive/big-rig", "sub_path": "app/src/thirdparty/dateutil/zoneinfo/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 857, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tarfile.TarFile.open", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tarfile.TarFile", "line_number": 21, "usage_type": "name"}, {"api_name": "tarfile.TarFile", "line_number": 22, "usage_type": "argument"}, {"api_name": "contextlib.closing", "line_number": 24, "usage_type": "call"}, {"api_name": "tarfile.TarFile.open", "line_number": 24, "usage_type": "call"}, {"api_name": "tarfile.TarFile", "line_number": 24, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 34, "usage_type": "call"}, {"api_name": "pkgutil.get_data", "line_number": 34, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 36, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "subprocess.check_call", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "74875765315", "text": "from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom jobs.models import Job\nimport json\nfrom datetime import datetime, date\nimport dateparser\n\n\nclass Command(BaseCommand):\n help = 'Set up the database'\n\n def handle(self, *args: str, **options: str):\n with open('static/210521chambajobs.json', 'r') as handle:\n big_json = json.loads(handle.read())\n for item in big_json:\n if len(item['description']) == 0:\n print('Not created. Description empty')\n continue\n\n if item['publication_date'] != None:\n dt = dateparser.parse(item['publication_date'])\n else:\n dt = datetime.now()\n\n new_date = date(dt.year, dt.month, dt.day)\n\n existing_job = Job.objects.filter(\n\n job_title = item['job_title'],\n company = item['company'],\n company_url = item['company_url'],\n description = item['description'],\n publication_date = new_date,\n salary = item['salary'],\n city = item['city'],\n district = item['district'],\n job_url = item['job_url'],\n job_type = item['job_type'],\n\n )\n if existing_job.exists():\n print('This Job already exist')\n else:\n Job.objects.create(\n\n job_title = item['job_title'],\n company = item['company'],\n company_url = item['company_url'],\n description = item['description'],\n publication_date = new_date,\n salary = item['salary'],\n city = item['city'],\n district = item['district'],\n job_url = item['job_url'],\n job_type = item['job_type'],\n\n )\n\n self.stdout.write(self.style.SUCCESS('added jobs!'))\n", "repo_name": "dgpb/jobarts", "sub_path": "jobs/management/commands/addjobs.py", "file_name": "addjobs.py", "file_ext": "py", "file_size_in_byte": 2160, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 9, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}, {"api_name": "dateparser.parse", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 25, "usage_type": "call"}, {"api_name": "jobs.models.Job.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "jobs.models.Job.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "jobs.models.Job", "line_number": 27, "usage_type": "name"}, {"api_name": "jobs.models.Job.objects.create", "line_number": 44, "usage_type": "call"}, {"api_name": "jobs.models.Job.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "jobs.models.Job", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "13372900811", "text": "import json\nimport glob\nfrom shutil import move, copy\nimport pickle\nimport random\n\n# get the right files\n# parse them into json with same filenames\nimport os\n\n\ndef parse_to_json(txt_path, out_json):\n with open(txt_path, encoding=\"utf-8\", errors='ignore') as f:\n lines = f.readlines()\n\n # get transcriptions from 2007 06 11 onwards\n lines = lines[6455:]\n\n result = {}\n\n l = 0\n\n while (l < len(lines) -2):\n print(l)\n\n line = lines[l]\n\n date = line[0:6]\n\n i = 1\n\n next_lines = []\n print(date)\n\n while lines[l + i][0:6] == date:\n next_lines.append(lines[l + i])\n i = i + 1\n\n year = line[0:2]\n month = line[2:4]\n day = line[4:6]\n\n next_lines.insert(0, line)\n\n full_t = \"\"\n for n_line in next_lines:\n p_l = n_line[10:]\n p_l = p_l.strip('\\n')\n\n full_t = full_t + p_l + \" \"\n\n # remove last space\n full_t = full_t[:-1]\n\n split_t = full_t.split(\" - \")\n\n for j, t in enumerate(split_t):\n k = \"20\" + year + \"-\" + month + \"-\" + day + \"_\" + str(j)\n\n result[k] = t\n\n l = l + i\n\n with open(out_json, \"w+\") as out_file:\n json.dump(result, out_file)\n\ndef json_to_txt_from_images(transcriptions_json, image_paths, out_folder, images_folder):\n\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n\n with open(transcriptions_json) as transcriptions_file:\n t_dict = json.load(transcriptions_file)\n\n missing = []\n\n for impath in image_paths:\n\n key = impath.replace(\".png\", \"\")\n key = key.replace(images_folder, \"\")\n\n # print(key)\n\n txt_path = impath.replace(\".png\", \".txt\")\n txt_path = txt_path.replace(images_folder, out_folder)\n\n if key in t_dict.keys():\n annotation = t_dict[key]\n\n file_object = open(txt_path, \"w+\")\n file_object.write(annotation)\n file_object.close()\n\n else:\n print(key)\n missing.append(key)\n\n res = {\"missing\": missing}\n print(res)\n\n with open(\"missing.json\", \"w+\") as missing_file:\n json.dump(res, missing_file)\n\ndef fix_miss_annotated_images(missing_json, images_folder):\n\n with open(missing_json) as missing_file:\n missing_dict = json.load(missing_file)\n\n missing_list = missing_dict[\"missing\"]\n\n for missing in missing_list:\n\n base_path = images_folder + missing[0:-1] + \"*\"\n\n images_list = sorted(glob.glob(base_path))\n\n for i, image_path in enumerate(images_list):\n # remove last digit\n\n # x.png\n new_image_path = image_path[0:-5] + str(i) + \".png\"\n\n move(image_path, new_image_path)\n\ndef remove_missing(missing_json, images_folder):\n\n if not os.path.exists(\"data/dilbert/dilbert_transcribed/missing/\"):\n os.makedirs(\"data/dilbert/dilbert_transcribed/missing/\")\n\n with open(missing_json) as missing_file:\n missing_dict = json.load(missing_file)\n\n missing_list = missing_dict[\"missing\"]\n\n for missing in missing_list:\n\n images_list = sorted(glob.glob(images_folder + missing + \".png\"))\n\n for i, image_path in enumerate(images_list):\n # remove last digit\n new_image_path = image_path.replace(images_folder, \"data/dilbert/dilbert_transcribed/missing/\")\n # x.png\n move(image_path, new_image_path)\n\n\ndef make_train_test_pickles(images_list, image_folder, out_folder):\n\n length = len(images_list)\n tr_length = round(length * .75)\n\n file_paths = [fn.replace(image_folder, \"001.dilbert_transcribed/\").replace(\".png\", \"\") for fn in images_list]\n\n # ['001.Black_footed_Albatross/Black_Footed_Albatross_0046_18',\n print(file_paths[0])\n\n class_i = [1]*length\n\n file_paths_train = file_paths[0:tr_length]\n file_paths_test = file_paths[tr_length:]\n\n classi = [1] * length\n classi_train = classi[0:tr_length]\n classi_test = classi[tr_length:]\n\n print(length)\n\n if not os.path.exists(out_folder + \"train/\"):\n os.makedirs(out_folder + \"train/\")\n\n if not os.path.exists(out_folder + \"test/\"):\n os.makedirs(out_folder + \"test/\")\n\n pickle.dump(file_paths_train, open(out_folder + \"train/filenames.pickle\", \"wb\"))\n pickle.dump(file_paths_test, open(out_folder + \"test/filenames.pickle\", \"wb\"))\n\n pickle.dump(classi_train, open(out_folder + \"train/class_info.pickle\", \"wb\"))\n pickle.dump(classi_test, open(out_folder + \"test/class_info.pickle\", \"wb\"))\n\n\ndef make_examples(transcriptions_list, transcriptions_folder, out_folder, out_folder_old):\n\n # gen 2048\n fn_object = open(out_folder.replace(\"gen_captions/\", \"\") + \"example_filenames\", \"w+\")\n file_names = transcriptions_list[0:2050]\n\n for fn in file_names:\n copy_to = fn.replace(transcriptions_folder, out_folder)\n copy(fn, copy_to)\n\n fn = fn.replace(out_folder_old, \"gen_captions/\").replace(\".txt\", \"\")\n\n fn_object.write(fn)\n fn_object.write(\"\\n\")\n fn_object.close()\n\n\ndef save_losses(g_losses, d_losses, epoch):\n import json\n\n with open(\"g_d_losses_{}.json\".format(epoch), \"w+\") as js_file:\n res = {\"g_losses\": g_losses, \"d_losses\": d_losses}\n json.dump(res, js_file)\n\n\ndef copy_images_transcriptions(image_folder, image_paths, transcriptions_folder, transcriptions_paths, copy_to_im, copy_to_txt):\n\n new_im_path = image_paths[0].replace(image_folder, copy_to_im)\n new_tr_path = transcriptions_paths[0].replace(transcriptions_folder, copy_to_txt)\n\n if not os.path.exists(new_im_path):\n os.makedirs(new_im_path)\n os.makedirs(new_tr_path)\n\n for im_path in image_paths:\n new_im_path = im_path.replace(image_folder, copy_to_im)\n move(im_path, new_im_path)\n\n for tr_path in transcriptions_paths:\n new_tr_path = tr_path.replace(transcriptions_folder, copy_to_txt)\n move(tr_path, new_tr_path)\n\n\nif __name__ == \"__main__\":\n transcriptions_path = \"../data/dilbert/transcriptions.txt\"\n out_json = \"data/dilbert/transcriptions.json\"\n\n # parse_to_json(transcriptions_path, out_json)\n image_folder = \"data/dilbert/dilbert_transcribed_10k/images/001.dilbert_transcribed/\"\n image_paths = sorted(glob.glob(\"data/dilbert/dilbert_transcribed_10k/images/001.dilbert_transcribed/*\"))\n out_folder = \"data/dilbert/dilbert_transcribed/\"\n\n train_paths_im = image_paths[0:3000]\n\n test_paths = image_paths[3000:5200]\n\n # json_to_txt_from_images(out_json, image_paths, out_folder, image_folder)\n\n # fix_miss_annotated_images(\"missing.json\", image_folder)\n\n # remove_missing(\"missing.json\", image_folder)\n\n\n transcriptions_folder = \"data/dilbert/dilbert_transcribed_10k/text/001.dilbert_transcribed/\"\n transcriptions_list = sorted(glob.glob(\"data/dilbert/dilbert_transcribed_10k/text/001.dilbert_transcribed/*\"))\n\n train_paths_txt = transcriptions_list[0:3000]\n\n gen_trans = transcriptions_list[-2100:]\n\n print(len(image_paths))\n\n out_folder_txt = out_folder + \"text/001.dilbert_transcribed/\"\n out_folder_im = out_folder + \"images/001.dilbert_transcribed/\"\n\n image_folder_3k = \"data/dilbert/dilbert_transcribed/images/001.dilbert_transcribed/\"\n image_paths_3k = sorted(glob.glob(\"data/dilbert/dilbert_transcribed/images/001.dilbert_transcribed/*\"))\n\n # make_train_test_pickles(image_paths_3k, image_folder_3k, out_folder)\n\n out_folder_gen_captions = out_folder + \"gen_captions/\"\n out_folder_old = \"data/dilbert/dilbert_transcribed_10k/text/001.dilbert_transcribed/\"\n\n print(len(gen_trans))\n\n # copy_images_transcriptions(image_folder, train_paths_im, transcriptions_folder, train_paths_txt, out_folder_im, out_folder_txt)\n\n # transcriptions_folder = \"data/dilbert/dilbert_transcribed/text/001.dilbert_transcribed/\"\n # make_examples(gen_trans, transcriptions_folder, out_folder_gen_captions, out_folder_old)\n\n", "repo_name": "bprovanbessell/research-code", "sub_path": "data-prep-and-analysis/transcription_parser.py", "file_name": "transcription_parser.py", "file_ext": "py", "file_size_in_byte": 8151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.dump", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 70, "usage_type": "call"}, {"api_name": "json.load", "line_number": 73, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 102, "usage_type": "call"}, {"api_name": "json.load", "line_number": 107, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 115, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 128, "usage_type": "call"}, {"api_name": "json.load", "line_number": 131, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 137, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 171, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 173, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 174, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 176, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 177, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 188, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 211, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 212, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 216, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 220, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 229, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 244, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "12929825596", "text": "\"\"\" A simple decorator for computing time cost.\n\nThe results will be passed to the logger defined below.\n\"\"\"\nimport logging\nimport time\nfrom common import LOG_FILE\nlogger = logging.getLogger(__name__)\nfile_handler = logging.FileHandler(LOG_FILE, mode='a')\nlogger.addHandler(file_handler)\nlogger.setLevel(logging.INFO)\n\ndef time_cost(func):\n def wrapper(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n time_cost = time.time() - start\n logger.info(\"The time cost of {} is {}.\".format(func.__name__, time_cost))\n return ret\n return wrapper\n", "repo_name": "Tou7and/audio-content-analysis", "sub_path": "src/time_cost.py", "file_name": "time_cost.py", "file_ext": "py", "file_size_in_byte": 599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 9, "usage_type": "call"}, {"api_name": "common.LOG_FILE", "line_number": 9, "usage_type": "argument"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "29463652353", "text": "import pandas as pd\nimport re\nfrom tqdm.notebook import tqdm\nimport snscrape.modules.twitter as sntwitter \nimport pymysql\nfrom datetime import datetime, timedelta\nimport schedule\nimport time\nimport query \nimport classify_with_model\nimport preprocessing\n\n# 단어 리스트\nsearch_keyword_list = {\n '기타': ['드라퍼', '브액', '아이스드랍', '주사기', '클럽약', '텔레', '파티약', '허브', '후리베이스'],\n '대마': ['대마초', '사티바', '인디카', '합성대마', '해시시'],\n '메스암페타민': ['사끼', '차가운술', '작대기', '떨액', '크리스탈', '삥두', '시원한술', '필로폰', '아이스술', '히로뽕', '액상떨', '아이스', '북한산아이스', '빙두', '찬술', '샤부'],\n '사일로시빈': ['마법의버섯', '환각버섯'],\n '아편': ['스틸녹스', '신의눈물', '에토미데이트', '옥시코돈', '졸피뎀', '트라마돌'],\n '알킬니트라이트': ['러쉬파퍼', '랏슈', '정글주스'],\n '케타민': ['캔디케이', '케타민'],\n '코카인': ['서울코카인', '충북코카인', '충남코카인', '강원코카인', '경기코카인', '전북코카인', '전남코카인', '경북코카인', '경남코카인', '제주코카인', '강남코카인', '부산코카인', '인천코카인'],\n 'GHB': ['물뽕', '발정제', '최음제'],\n 'LSD': ['엘에스디'],\n 'MDMA': ['엑스터시', '서울도리도리', '충북도리도리', '충남도리도리', '강원도리도리', '경기도리도리', '전북도리도리', '전남도리도리', '경북도리도리', '경남도리도리', '제주도리도리', '강남도리도리', '부산도리도리', '인천도리도리', '서울몰리', '충북몰리', '충남몰리', '강원몰리', '경기몰리', '전북몰리', '전남몰리', '경북몰리', '경남몰리', '제주몰리', '강남몰리', '부산몰리', '인천몰리']\n }\n \nexcept_words = ['허브맛', '허브맛쿠키', '허브솔트', '스파허브', '아이허브', '미국', '대회', 'F1', '유아인', '휘성', '검찰', '해시브라운', '시간', '웃어', '웃으', '시시해', \\\n '에프엑스', 'fx', '정수정', '크리스탈라이트', '제시카', \\\n '아이스베어', '아이스탕후루', '아이스만주', '아이스만쥬', '아메리카노', '얼죽아', '블랙아이스', '아이스크림', '초코', '커피', '카페', '아이스께끼', '찰떡', '아이스티', '겨울', '라떼', '에스프레소', '하키', '팝업', '주문', '당첨', '블렌드', '블렌디드', '바닐라', '헤이즐넛', '모찌', '케이크', '음료', '콜드브루', '프라푸치노', '엔시티', '스톰', '아이스맨', '매브', '매버릭', \\\n '남경필', '한서희', '브레이킹 배드', '돈스파이크', \\\n '브레이킹 배드', \\\n '샤브샤브', '샤브', \\\n '오마이걸 유빈', \\\n 'PD수첩', '히어로물뽕', '홍준표', '돼지', \\\n '몰리면', '홀리몰리', '홀리 몰리', '과카몰리', '몰리게', '내몰리', '몰리는', '미스몰리', \\\n '엑스토시움', \\\n '유아인', '허성태', '코카인댄스', \\\n '머쉬룸 스프', '머쉬룸스프', '수프', '버거', '파스타', '맛집', '표고버섯', '치즈', '피자', \\\n '양지원', \\\n '의사', '병원', '처방받', '처방 받', '졸피뎀과 나', '처벌', '구속', '불면', \\\n '정글쥬스', \\\n '전두환 손자', '전우원', '돈스파이크', '유아인', \\\n '병원','여드름','뾰루지','얼굴','흉터','흉','상처',\\\n '라이브','북클럽','콘서트','팬미팅','팬클럽','공연','대리', '음향', '춤', '50억', '비리', '수사', '대리티켓팅', '50억클럽', '멜론', '수작', '냄새', '웹툰', '게임'\n ]\n\n#####################################################################################\n\ndef crawl_for_period(\n type: str,\n search_query: str,\n start_date: str,\n end_date: str,\n except_words: list # 제외어 리스트 \n ):\n \n query = str(search_query) + \" since:\" + str(start_date) + \" until:\" + str(end_date)\n print(f\"검색 query: {query}\")\n\n # 트위터 데이터 저장할 리스트\n tweets_list = []\n \n for i, tweet in (enumerate(sntwitter.TwitterSearchScraper(query).get_items())): \n # 수집할 데이터 컬럼\n data = [\n type, \n search_query,\n tweet.date, \n tweet.id,\n tweet.user.username,\n tweet.user.displayname,\n tweet.place,\n tweet.user.location,\n tweet.content, \n tweet.likeCount,\n tweet.retweetCount, \n tweet.viewCount,\n tweet.hashtags,\n tweet.media, \n tweet.sourceLabel\n ]\n\n # 트윗 내용에 제외어 하나라도 포함시 제외하기\n if any(words in tweet.content for words in except_words):\n continue\n \n # 리트윗 데이터는 제외하기 (ex. @닉네임)\n regex = re.compile(\"@[\\w_]+\")\n if regex.search(tweet.content):\n continue \n \n # tweet.content 전처리\n data[8] = preprocessing.preprocessing_data(tweet.content)\n \n # media, hashtag 리스트를 string으로 변환해서 저장하기\n if isinstance(data[-2], list): \n data[-2] = str(data[-2])\n if isinstance(data[-3], list): \n data[-3] = str(data[-3])\n \n # 모델이 content에 대해서 classification 수행\n pred_label, prediction, _ = classify_with_model.test_sentences(data[8])\n \n # 마약 거래 게시글이 맞으면 tweets_list에 넣기\n if pred_label == 1: \n data.append(pred_label)\n data.append(prediction)\n tweets_list.append(data) \n print(f'[{search_query}] 키워드에 대해서 분류된 트윗 개수: {len(tweets_list)}')\n return tweets_list\n\ndef search_twitter():\n print(f'{datetime.today()} 작업 실행')\n \n # DB에 3일간의 데이터만 저장하기 위해 기존 데이터 지우기\n query.delete_from_db('DELETE FROM classified')\n search_query = ''\n \n # 3일간의 데이터 추적\n start_date= (datetime.today() -timedelta(3)).strftime(\"%Y-%m-%d\")\n end_date = datetime.today().strftime(\"%Y-%m-%d\")\n \n # 키워드 하나씩 db에 저장\n for type1, type2 in search_keyword_list.items():\n for t in type2:\n print(f'type1: {type1}, type2: {t}')\n search_query = t\n tweets_list = crawl_for_period(type1, search_query, start_date, end_date, except_words) \n # mysql DB에 저장\n if len(tweets_list) > 0:\n query.save_to_db(tweets_list)\n print(f\"\\n ------------------- 총 {len(tweets_list)}개 게시글 저장 완료 -------------------\\n\\n\")\n# main 실행\nsearch_twitter()\n\n", "repo_name": "pastel-blue/2023_dscap", "sub_path": "Prototype/crawler/main_crawler.py", "file_name": "main_crawler.py", "file_ext": "py", "file_size_in_byte": 7249, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "snscrape.modules.twitter.TwitterSearchScraper", "line_number": 64, "usage_type": "call"}, {"api_name": "snscrape.modules.twitter", "line_number": 64, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 89, "usage_type": "call"}, {"api_name": "preprocessing.preprocessing_data", "line_number": 94, "usage_type": "call"}, {"api_name": "classify_with_model.test_sentences", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "name"}, {"api_name": "query.delete_from_db", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 121, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "name"}, {"api_name": "query.save_to_db", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "37210701717", "text": "import math\nimport pandas as pd\nimport pyro\nimport pyro.distributions as dist\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pyro.infer import SVI, Trace_ELBO\nfrom tqdm import trange\n\n\"\"\"\nThis port to Pyro doesn't work, the topics are not coherent...\n\n\"\"\"\n\n\nclass Encoder(nn.Module):\n def __init__(self, vocab_size, num_topics, hidden, dropout):\n super().__init__()\n self.drop = nn.Dropout(dropout)\n self.fc1 = nn.Linear(vocab_size, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fcmu = nn.Linear(hidden, num_topics)\n self.fclv = nn.Linear(hidden, num_topics)\n self.bnmu = nn.BatchNorm1d(num_topics)\n self.bnlv = nn.BatchNorm1d(num_topics)\n\n def forward(self, inputs):\n h = F.softplus(self.fc1(inputs))\n h = F.softplus(self.fc2(h))\n h = self.drop(h)\n theta_loc = self.bnmu(self.fcmu(h))\n theta_scale = self.bnlv(self.fclv(h))\n return theta_loc, theta_scale\n\n\nclass Decoder(nn.Module):\n def __init__(self, vocab_size, num_topics, dropout):\n super().__init__()\n self.beta = nn.Linear(num_topics, vocab_size)\n self.bn = nn.BatchNorm1d(vocab_size)\n self.drop = nn.Dropout(dropout)\n\n def forward(self, inputs):\n inputs = self.drop(inputs)\n return F.log_softmax(self.bn(self.beta(inputs)), dim=1)\n\n\nclass ProdLDA(nn.Module):\n def __init__(self, vocab_size, num_topics, hidden, dropout, device):\n super().__init__()\n self.vocab_size = vocab_size\n self.num_topics = num_topics\n self.inference_net = Encoder(vocab_size, num_topics, hidden, dropout)\n self.recognition_net = Decoder(vocab_size, num_topics, dropout)\n self.device = device\n\n def model(self, doc_sum=None):\n # register PyTorch module `decoder` with Pyro\n pyro.module(\"recognition_net\", self.recognition_net)\n with pyro.plate(\"documents\", doc_sum.shape[0]):\n # setup hyperparameters\n theta_loc = doc_sum.new_zeros((doc_sum.shape[0], self.num_topics))\n theta_scale = doc_sum.new_ones((doc_sum.shape[0], self.num_topics))\n # sample from prior (value will be sampled by guide\n # when computing the ELBO)\n theta = pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, (0.5 * theta_scale).exp()).to_event(1))\n theta = theta / theta.sum(1, keepdim=True)\n\n count_param = self.recognition_net(theta)\n pyro.sample(\n 'obs',\n dist.Multinomial(doc_sum.shape[1], count_param).to_event(1),\n obs=doc_sum\n )\n\n def guide(self, doc_sum=None):\n # Use an amortized guide for local variables.\n pyro.module(\"inference_net\", self.inference_net)\n with pyro.plate(\"documents\", doc_sum.shape[0]):\n theta_loc, theta_scale = self.inference_net(doc_sum)\n pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, (0.5 * theta_scale).exp()).to_event(1))\n\n def beta(self):\n return self.recognition_net.beta.weight.cpu().detach().T\n\n\ndef train(device, doc_sum, batch_size, learning_rate, num_epochs):\n # clear param store\n pyro.clear_param_store()\n\n prodLDA = ProdLDA(\n vocab_size=doc_sum.shape[1],\n num_topics=100,\n hidden=100,\n dropout=0.2,\n device=device\n )\n prodLDA.to(device)\n\n optimizer = pyro.optim.Adam({\"lr\": learning_rate})\n svi = SVI(prodLDA.model, prodLDA.guide, optimizer, loss=Trace_ELBO())\n num_batches = int(math.ceil(doc_sum.shape[0] / batch_size))\n\n bar = trange(num_epochs)\n for epoch in bar:\n running_loss = 0.0\n\n # Iterate over data.\n for i in range(num_batches):\n batch_doc_sum = doc_sum[i * batch_size:(i + 1) * batch_size, :]\n loss = svi.step(batch_doc_sum)\n running_loss += loss / batch_doc_sum.size(0)\n\n epoch_loss = running_loss / doc_sum.shape[0]\n bar.set_postfix(epoch_loss='{:.2f}'.format(epoch_loss))\n\n return prodLDA\n\n\nif __name__ == '__main__':\n # The data used is the pre-processed AP corpus from David Blei's website:\n # http://www.cs.columbia.edu/~blei/lda-c/\n # (the pre-processing code is not included for simplification)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n doc_sum = torch.load('doc_sum_ap.pt').float().to(device)\n trained_model = train(device, doc_sum, 32, 1e-3, 80)\n\n beta = trained_model.beta()\n torch.save(beta, 'betas.pt')\n\n # Print topics' top words\n vocab = pd.read_csv('../input/prodlda/vocab.csv')\n for i in range(beta.shape[0]):\n sorted_, indices = torch.sort(beta[i], descending=True)\n df = pd.DataFrame(indices[:20].numpy(), columns=['index'])\n print(pd.merge(df, vocab[['index', 'word']], how='left', on='index')['word'].values)\n print()\n", "repo_name": "ucals/prodlda_stuck", "sub_path": "prodlda_port_to_pyro.py", "file_name": "prodlda_port_to_pyro.py", "file_ext": "py", "file_size_in_byte": 4928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Module", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.functional.softplus", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.functional.softplus", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "pyro.module", "line_number": 60, "usage_type": "call"}, {"api_name": "pyro.plate", "line_number": 61, "usage_type": "call"}, {"api_name": "pyro.sample", "line_number": 67, "usage_type": "call"}, {"api_name": "pyro.distributions.LogNormal", "line_number": 68, "usage_type": "call"}, {"api_name": "pyro.distributions", "line_number": 68, "usage_type": "name"}, {"api_name": "pyro.sample", "line_number": 72, "usage_type": "call"}, {"api_name": "pyro.distributions.Multinomial", "line_number": 74, "usage_type": "call"}, {"api_name": "pyro.distributions", "line_number": 74, "usage_type": "name"}, {"api_name": "pyro.module", "line_number": 80, "usage_type": "call"}, {"api_name": "pyro.plate", "line_number": 81, "usage_type": "call"}, {"api_name": "pyro.sample", "line_number": 83, "usage_type": "call"}, {"api_name": "pyro.distributions.LogNormal", "line_number": 84, "usage_type": "call"}, {"api_name": "pyro.distributions", "line_number": 84, "usage_type": "name"}, {"api_name": "pyro.clear_param_store", "line_number": 92, "usage_type": "call"}, {"api_name": "pyro.optim.Adam", "line_number": 103, "usage_type": "call"}, {"api_name": "pyro.optim", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pyro.infer.SVI", "line_number": 104, "usage_type": "call"}, {"api_name": "pyro.infer.Trace_ELBO", "line_number": 104, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 105, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 139, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "13553211571", "text": "#! /usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport time\nimport csv\n\nprev_frame_time = 0\n\ndef callback(msg):\n\n roll = pitch = yaw = 0.0\n orientation_q = msg.pose.pose.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n (roll, pitch, yaw) = euler_from_quaternion (orientation_list)\n \n position_q = msg.pose.pose.position\n \n pos = [position_q.x, position_q.y, yaw]\n \n print(pos)\n \n global prev_frame_time\n new_frame_time = rospy.get_time()\n if (new_frame_time-prev_frame_time > 0):\n fps = str(round( 1/(new_frame_time-prev_frame_time), 2 ))\n else:\n fps = 'inf'\n prev_frame_time = new_frame_time\n logfps = 'odom rate: '+fps+' Hz'\n rospy.loginfo(logfps)\n \nrospy.init_node('odom')\nsub = rospy.Subscriber('/odom', Odometry, callback)\nrospy.spin()\n", "repo_name": "jakabfarkas/ros_cone_detection", "sub_path": "scripts/odom_subscriber.py", "file_name": "odom_subscriber.py", "file_ext": "py", "file_size_in_byte": 964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tf.transformations.euler_from_quaternion", "line_number": 16, "usage_type": "call"}, {"api_name": "rospy.get_time", "line_number": 25, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 32, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 34, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 35, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 35, "usage_type": "argument"}, {"api_name": "rospy.spin", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "41638126988", "text": "import pytest\nimport numpy as np\nimport openpnm as op\nimport numpy.testing as nt\n\n\nclass TransientImplicitReactiveTransportTest:\n\n def setup_class(self):\n np.random.seed(0)\n self.net = op.network.Cubic(shape=[3, 3, 1], spacing=1e-6)\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.geo['pore.volume'] = 1e-12\n self.phase = op.phases.GenericPhase(network=self.net)\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n self.phys['pore.A'] = -1e-13\n self.phys['pore.k'] = 2\n self.phys['throat.diffusive_conductance'] = 1e-12\n mod = op.models.physics.generic_source_term.standard_kinetics\n self.phys.add_model(propname='pore.reaction',\n model=mod,\n prefactor='pore.A',\n exponent='pore.k',\n X='pore.concentration',\n regen_mode='deferred')\n self.settings = {'conductance': 'throat.diffusive_conductance',\n 'quantity': 'pore.concentration'}\n self.alg = op.algorithms.TransientReactiveTransport(network=self.net,\n phase=self.phase,\n settings=self.settings)\n self.alg.setup(quantity='pore.concentration',\n conductance='throat.diffusive_conductance',\n t_initial=0, t_final=1, t_step=0.1, t_tolerance=1e-7,\n t_precision=10, rxn_tolerance=1e-6)\n self.alg.set_value_BC(pores=self.net.pores('front'), values=2)\n self.alg.set_source(propname='pore.reaction',\n pores=self.net.pores('back'))\n self.alg.set_IC(0)\n\n def test_transient_implicit_reactive_transport(self):\n self.alg.setup(t_scheme='implicit')\n self.alg.run()\n x = [2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n\n def test_transient_cranknicolson_reactive_transport(self):\n self.alg.setup(t_scheme='cranknicolson')\n self.alg.run()\n x = [2., 0.97167537, 0.4209642,\n 2., 0.97167537, 0.4209642,\n 2., 0.97167537, 0.4209642]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n\n def test_transient_reactive_transport_output_times(self):\n self.alg.setup(t_output=[0, 0.5, 0.7, 1])\n self.alg.run()\n times = [\"pore.concentration@0\",\n \"pore.concentration@5e-1\",\n \"pore.concentration@7e-1\",\n \"pore.concentration@1\"]\n assert set(times).issubset(self.alg.keys())\n\n def test_transient_reactive_transport_results(self):\n times_total = [\"pore.concentration@0\",\n \"pore.concentration@5e-1\",\n \"pore.concentration@7e-1\",\n \"pore.concentration@1\"]\n results_total = set(self.alg.results(steps=None).keys())\n results_total.discard(\"pore.concentration\")\n assert set(times_total) == results_total\n times_partial = [\"pore.concentration@5e-1\",\n \"pore.concentration@1\"]\n results_partial = set(self.alg.results(times=[0.5, 1]).keys())\n results_partial.discard(\"pore.concentration\")\n assert set(times_partial) == results_partial\n\n def test_transient_steady_mode_reactive_transport(self):\n self.alg.setup(t_scheme=\"steady\")\n self.alg.run()\n x = [2, 1.76556357, 1.53112766,\n 2, 1.76556357, 1.53112766,\n 2, 1.76556357, 1.53112766]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n self.alg.run()\n\n def test_consecutive_runs_preserves_solution(self):\n self.alg.setup(t_scheme='implicit')\n self.alg.run()\n x = [2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n self.alg.run()\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n\n def test_adding_bc_over_sources(self):\n with pytest.raises(Exception):\n self.alg.set_value_BC(pores=self.net.pores(\"right\"), values=0.3)\n\n def test_adding_sources_over_bc(self):\n with pytest.raises(Exception):\n self.alg.set_source(propname='pore.reaction',\n pores=self.net.pores('left'))\n\n def test_ensure_settings_are_valid(self):\n alg = op.algorithms.TransientReactiveTransport(network=self.net,\n phase=self.phase)\n with pytest.raises(Exception, match=r\".*quantity.*\"):\n alg.run()\n alg.settings['quantity'] = 'pore.concentration'\n with pytest.raises(Exception, match=r\".*conductance.*\"):\n alg.run()\n alg.settings['conductance'] = 'throat.conductance'\n with pytest.raises(Exception):\n alg.run()\n\n def teardown_class(self):\n ws = op.Workspace()\n ws.clear()\n\n\nif __name__ == '__main__':\n\n t = TransientImplicitReactiveTransportTest()\n t.setup_class()\n self = t\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n", "repo_name": "halotudio/openPNM-copy2", "sub_path": "tests/unit/algorithms/TransientReactiveTransportTest.py", "file_name": "TransientReactiveTransportTest.py", "file_ext": "py", "file_size_in_byte": 5837, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "openpnm.network.Cubic", "line_number": 11, "usage_type": "call"}, {"api_name": "openpnm.network", "line_number": 11, "usage_type": "attribute"}, {"api_name": "openpnm.geometry.GenericGeometry", "line_number": 12, "usage_type": "call"}, {"api_name": "openpnm.geometry", "line_number": 12, "usage_type": "attribute"}, {"api_name": "openpnm.phases.GenericPhase", "line_number": 16, "usage_type": "call"}, {"api_name": "openpnm.phases", "line_number": 16, "usage_type": "attribute"}, {"api_name": "openpnm.physics.GenericPhysics", "line_number": 17, "usage_type": "call"}, {"api_name": "openpnm.physics", "line_number": 17, "usage_type": "attribute"}, {"api_name": "openpnm.models", "line_number": 23, "usage_type": "attribute"}, {"api_name": "openpnm.algorithms.TransientReactiveTransport", "line_number": 32, "usage_type": "call"}, {"api_name": "openpnm.algorithms", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 105, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 108, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 112, "usage_type": "call"}, {"api_name": "openpnm.algorithms.TransientReactiveTransport", "line_number": 117, "usage_type": "call"}, {"api_name": "openpnm.algorithms", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 119, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 122, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 125, "usage_type": "call"}, {"api_name": "openpnm.Workspace", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "27706353368", "text": "from django.urls import path\nfrom .views import AdvertisementView, AdvertisementManyView\n\n\napp_name = \"advertisements\"\n\n# app_name will help us do a reverse look-up latter.\nurlpatterns = [\n path('advertisements/', AdvertisementManyView.as_view()),\n path('advertisement/', AdvertisementView.as_view()),\n path('advertisement', AdvertisementView.as_view()),\n]", "repo_name": "andru196/miniAd", "sub_path": "siteApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.AdvertisementManyView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.AdvertisementManyView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.AdvertisementView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.AdvertisementView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.AdvertisementView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.AdvertisementView", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "74482868993", "text": "import re\nimport datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport logging\nfrom typing import Tuple, List\nimport os\nfrom Producto import Producto\n\nHEADERS = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/107.0.0.0 Safari/537.36 '\n }\n\nexecution_datetime = datetime.datetime.now()\n\n\ndef get_xml_page(url: str) -> BeautifulSoup:\n \"\"\"\n :param url:\n :return: Devuelve un objeto BeautifulSoup para operar con la pagina cargada\n \"\"\"\n\n session = requests.Session()\n page = session.get(url, headers=HEADERS)\n soup = BeautifulSoup(page.content, features='xml')\n\n return soup\n\n\ndef get_html_page(url: str) -> BeautifulSoup:\n \"\"\"\n :param url:\n :return: Devuelve un objeto BeautifulSoup para operar con la pagina cargada\n \"\"\"\n\n session = requests.Session()\n # Se simula navegacion humana, con retraso de 10x el tiempo del request.\n t0 = time.time()\n page = session.get(url, headers=HEADERS)\n delay = time.time() - t0\n time.sleep(0.2 * delay)\n soup = BeautifulSoup(page.content, features='html.parser')\n\n return soup\n\n\ndef get_info_from_url(url: str) -> Producto:\n \"\"\"\n param url: url address to scrap\n return: dic with scrapped information.\n raise ProductoIncorrectoException: when coudn't fetch any product information\n \"\"\"\n\n page = get_html_page(url)\n\n producto = Producto()\n\n producto.product_id = str(url.split('/')[-1])\n producto.price = __obtain_price(page)\n producto.product, producto.brand = __obtain_name(page)\n producto.unit_price, producto.units = __obtain_price_per_unit(page)\n producto.categories = __obtain_categories(page)\n producto.discount = __obtain_discount(page)\n producto.date = execution_datetime.date()\n\n # comprobamos si hay informacion missing.\n if any([producto.price is None, producto.product is None, producto.brand is None,\n producto.unit_price is None, producto.units is None]):\n logging.warning(f\"{url} failed. Missing information.\")\n raise ProductoIncorrectoException(f\"Producto_id: {producto.product_id}\")\n\n return producto\n\n\ndef __obtain_name(page: BeautifulSoup) -> Tuple[str, str]:\n fetched_product = page.find_all(\"h1\", class_=\"product-title\")\n try:\n product_name = [process_name(product.text) for product in fetched_product][0]\n brand = [__process_brand(product.text) for product in fetched_product][0]\n except (IndexError, AttributeError):\n logging.warning('Product name not found')\n product_name = None\n brand = None\n return product_name, brand\n\n\ndef __obtain_price(page: BeautifulSoup) -> float:\n try:\n fetched_price = page.find_all(\"p\", class_=\"buy-box__active-price\")\n price = float([__process_price(price.text) for price in fetched_price][0])\n except (IndexError, AttributeError):\n logging.warning('Product price not found')\n price = None\n return price\n\n\ndef __obtain_categories(page: BeautifulSoup) -> List[str]:\n fetched_categories = page.find_all(\"span\", class_=\"breadcrumb-item__link\")\n try:\n categories = [__preprocess_str(category.text) for category in fetched_categories]\n except AttributeError:\n categories = None\n return categories\n\n\ndef __obtain_price_per_unit(page: BeautifulSoup) -> Tuple[float, str]:\n fetched_unit_prices = page.find_all(\"p\", \"buy-box__price-per-unit\")\n try:\n price = float([__process_price(unit_price.text) for unit_price in fetched_unit_prices][0])\n units = [__process_unit_price(unit_price.text) for unit_price in fetched_unit_prices][0]\n except (IndexError, AttributeError):\n logging.warning('Unit price not found')\n price = None\n units = None\n return price, units\n\n\ndef __obtain_discount(page: BeautifulSoup) -> str:\n try:\n fetched_discount = page.find_all(\"span\", \"product_details_promotion_description\")\n discount_percentage = [__process_discount(discount.text) for discount in fetched_discount][0]\n except (IndexError, AttributeError):\n discount_percentage = None\n return discount_percentage\n\n\ndef create_data_folder():\n today = str(datetime.date.today()).replace('-', '')\n data_path = os.path.join(os.getcwd(), '../..', 'dataset', today)\n os.makedirs(os.path.join(data_path, 'tmp'), exist_ok=True)\n return data_path\n\n\ndef __preprocess_str(text: str) -> str:\n rm_chars = [\"\\r\", \"\\n\", \"\\t\"]\n for char in rm_chars:\n text = text.replace(char, \"\")\n return text.replace(\",\", \".\").strip()\n\n\ndef __process_unit_price(text: str) -> str:\n match = re.search('€.+$', text).group().strip()\n return match\n\n\ndef __process_price(text: str) -> str:\n match = re.search('\\\\d+,\\\\d+', text).group().strip()\n return match.replace(\",\", \".\")\n\n\ndef __process_discount(text: str) -> str:\n match = re.search('\\\\b\\\\d+%', text).group().strip()\n return match.replace(\",\", \".\")\n\n\ndef __process_brand(text: str) -> str:\n text = __preprocess_str(text)\n match = re.findall('[A-Z]\\\\w+', text)\n\n return match[1]\n\n\ndef process_name(text: str) -> str:\n text = __preprocess_str(text)\n # match = re.findall('[A-Z][a-z áéíóú]+', text)\n match = re.findall('.*', text)\n\n return match[0]\n\n\ndef __print_page(page: BeautifulSoup, ruta: str):\n \"\"\"\n imprime la pagina escrapeada en la ruta correspondiente.\n \"\"\"\n with open(ruta, \"w\", encoding=\"utf-8\") as f:\n f.write(page.prettify())\n\n\nclass ProductoIncorrectoException(Exception):\n ...\n", "repo_name": "cperezh/FoodECommerceScraper", "sub_path": "source/Producto_DIA_DP/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 25, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 27, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 44, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "name"}, {"api_name": "Producto.Producto", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 71, "usage_type": "call"}, {"api_name": "Producto.Producto", "line_number": 49, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 83, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 77, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 89, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 94, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 99, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 108, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 114, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 108, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 131, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 144, "usage_type": "call"}, {"api_name": "re.search", "line_number": 149, "usage_type": "call"}, {"api_name": "re.search", "line_number": 154, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 160, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 168, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 173, "usage_type": "name"}]} +{"seq_id": "19216358153", "text": "\"\"\"An bruteforce agent that repeatedly cycles through all available actions in\norder.\n\nTo run 'tiny' benchmark scenario with default settings, run the following from\nthe nasim/agents dir:\n\n$ python bruteforce_agent.py tiny\n\nThis will run the agent and display progress and final results to stdout.\n\nTo see available running arguments:\n\n$ python bruteforce_agent.py --help\n\"\"\"\n\nfrom itertools import product\n\nimport nasim\n\nLINE_BREAK = \"-\"*60\n\n\ndef run_bruteforce_agent(env, step_limit=1e6, verbose=True):\n \"\"\"Run bruteforce agent on nasim environment.\n\n Parameters\n ----------\n env : nasim.NASimEnv\n the nasim environment to run agent on\n step_limit : int, optional\n the maximum number of steps to run agent for (default=1e6)\n verbose : bool, optional\n whether to print out progress messages or not (default=True)\n\n Returns\n -------\n int\n timesteps agent ran for\n float\n the total reward recieved by agent\n bool\n whether the goal was reached or not\n \"\"\"\n if verbose:\n print(LINE_BREAK)\n print(\"STARTING EPISODE\")\n print(LINE_BREAK)\n print(\"t: Reward\")\n\n env.reset()\n total_reward = 0\n done = False\n env_step_limit_reached = False\n steps = 0\n cycle_complete = False\n\n if env.flat_actions:\n act = 0\n else:\n act_iter = product(*[range(n) for n in env.action_space.nvec])\n\n while not done and not env_step_limit_reached and steps < step_limit:\n if env.flat_actions:\n act = (act + 1) % env.action_space.n\n cycle_complete = (steps > 0 and act == 0)\n else:\n try:\n act = next(act_iter)\n cycle_complete = False\n except StopIteration:\n act_iter = product(*[range(n) for n in env.action_space.nvec])\n act = next(act_iter)\n cycle_complete = True\n\n _, rew, done, env_step_limit_reached, _ = env.step(act)\n total_reward += rew\n\n if cycle_complete and verbose:\n print(f\"{steps}: {total_reward}\")\n steps += 1\n\n if done and verbose:\n print(LINE_BREAK)\n print(\"EPISODE FINISHED\")\n print(LINE_BREAK)\n print(f\"Goal reached = {env.goal_reached()}\")\n print(f\"Total steps = {steps}\")\n print(f\"Total reward = {total_reward}\")\n elif verbose:\n print(LINE_BREAK)\n print(\"STEP LIMIT REACHED\")\n print(LINE_BREAK)\n\n if done:\n done = env.goal_reached()\n\n return steps, total_reward, done\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"env_name\", type=str, help=\"benchmark scenario name\")\n parser.add_argument(\"-s\", \"--seed\", type=int, default=0,\n help=\"random seed\")\n parser.add_argument(\"-o\", \"--partially_obs\", action=\"store_true\",\n help=\"Partially Observable Mode\")\n parser.add_argument(\"-p\", \"--param_actions\", action=\"store_true\",\n help=\"Use Parameterised action space\")\n parser.add_argument(\"-f\", \"--box_obs\", action=\"store_true\",\n help=\"Use 2D observation space\")\n args = parser.parse_args()\n\n nasimenv = nasim.make_benchmark(\n args.env_name,\n args.seed,\n not args.partially_obs,\n not args.param_actions,\n not args.box_obs\n )\n if not args.param_actions:\n print(nasimenv.action_space.n)\n else:\n print(nasimenv.action_space.nvec)\n run_bruteforce_agent(nasimenv)\n", "repo_name": "Jjschwartz/NetworkAttackSimulator", "sub_path": "nasim/agents/bruteforce_agent.py", "file_name": "bruteforce_agent.py", "file_ext": "py", "file_size_in_byte": 3577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 109, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.product", "line_number": 60, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 71, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 102, "usage_type": "call"}, {"api_name": "nasim.make_benchmark", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "14426073475", "text": "\nimport sys\nimport configparser\n\nCONFIG_FN = \"main.cfg\"\n\ndata = configparser.ConfigParser()\nif len( data.read(CONFIG_FN,encoding='utf8') )!=1:\n\tprint( f\"Failed to read config file '{CONFIG_FN}'\" )\n\tsys.exit(1)\n", "repo_name": "TasurtSPb/microelectronics", "sub_path": "modules/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 210, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "configparser.ConfigParser", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "7641568043", "text": "import os\nimport math\nimport PIL.Image\nimport PIL.ImageDraw\nimport pyttsx3\nimport time\nimport tkinter as tk\nfrom tkinter import *\nimport torch\nimport torchvision.transforms as transforms\nfrom train import SoftmaxModel\nfrom train import ConvNet\n\nWIDTH = 32 * 12 # width of the window\nHEIGHT = 32 * 6 # height of the window\n\nLINE_WIDTH = 5 # width of the pen\nLINE_RESOLUTION = 5 # minimum length of line segment (larger means more accurate angles but rougher lines)\n\nANGLE_THRESHOLD = 70 # minimum angle to begin a new phoneme\nLOOP_THRESHOLD = 2 # moving the pen this close to a coordinate already in the phoneme counts as creating a loop\nLOOP_LENGTH = 3 # minimum unwound length of a loop (in LINE_RESOLUTIONs)\n\nDATA_PATH = './data/' # default data directory\nDATA_WIDTH = 128 # width of saved training data\nDATA_HEIGHT = 128 # height of saved training data\nPHONEMES = ['ay', 'd', 'ee', 'f', 'h', 'l', 'm', 'n', 'o', 'r', 's', 't', 'v'] # possible phonemes (in data order)\n\nNET = './conv_net.pkl' # neural network to run\nDEVICE = 'cpu' # device on which to run the net\n\n# UI and phoneme recorder for Gregg recognition tool\nclass Gregg(object):\n def __init__(self):\n self.window = tk.Tk()\n self.window.title('Gregg Recognition Tool')\n\n # set up buttons\n self.frame = Frame(self.window)\n self.frame.pack(side=tk.TOP, fill='x')\n self.clear_button = Button(self.frame, text='Clear', command=self.clear)\n self.clear_button.pack(side='left')\n self.read_button = Button(self.frame, text='Read', command=self.read)\n self.read_button.pack(side='left')\n self.speak_button = Button(self.frame, text='Speak', command=self.speak)\n self.speak_button.pack(side='left')\n self.label_button = Button(self.frame, text='Label', command=self.label_word)\n self.label_button.pack(side='left')\n self.phoneme_label = Label(self.frame, text='No phonemes.')\n self.phoneme_label.pack(side='right')\n\n # set up canvas\n self.canvas = Canvas(self.window, bg='white', width=WIDTH, height=HEIGHT)\n self.canvas.pack(side=tk.BOTTOM)\n self.clear()\n\n # bind left mouse button for drawing\n self.canvas.bind('', self.mouse_down)\n self.canvas.bind('', self.mouse_move)\n self.canvas.bind('', self.add_current_phoneme)\n\n # bind hotkeys\n self.window.bind('c', self.clear)\n self.window.bind('l', self.label_word)\n\n # get the net\n self.net = torch.load(NET)\n self.net.eval()\n\n # run UI loop\n self.window.mainloop()\n\n # subtract two tuples\n def sub_tuples(self, a, b):\n return (a[0] - b[0], a[1] - b[1])\n\n # get a phoneme based on its number\n def get_phoneme(self, num):\n return PHONEMES[num]\n \n # get the phoneme images for the current word\n def get_images(self):\n images = []\n\n for index in range(len(self.phoneme_list)):\n # get the position of the phoneme\n phoneme_x = min(coords[0] for coords in self.phoneme_list[index])\n phoneme_y = min(coords[1] for coords in self.phoneme_list[index])\n phoneme_coords = (phoneme_x, phoneme_y)\n\n # construct an image of the phoneme\n phoneme_image = PIL.Image.new('RGB', (DATA_WIDTH, DATA_HEIGHT), color='white')\n phoneme_draw = PIL.ImageDraw.Draw(phoneme_image)\n first_coords = self.sub_tuples(self.phoneme_list[index][0], phoneme_coords)\n if len(self.phoneme_list[index]) == 1:\n phoneme_draw.ellipse([first_coords, (first_coords[0] + LINE_WIDTH, first_coords[1] + LINE_WIDTH)], fill='black', width=LINE_WIDTH)\n phoneme_draw.line([self.sub_tuples(coords, phoneme_coords) for coords in self.phoneme_list[index]], fill='black', width=LINE_WIDTH, joint='curve')\n images.append(phoneme_image)\n\n return images\n\n # clear the canvas and the current word in memory\n def clear(self, event=None):\n self.canvas.delete('all')\n\n # draw grid lines\n grid_step = HEIGHT / 4\n grid_base = HEIGHT / 4 / 2\n for x in range(8):\n self.canvas.create_line(grid_base + x * grid_step, 0, grid_base + x * grid_step, HEIGHT, fill='light gray')\n for y in range(4):\n self.canvas.create_line(0, grid_base + y * grid_step, WIDTH, grid_base + y * grid_step, fill='light gray')\n\n # clear phonemes\n self.phoneme_list = []\n self.current_phoneme = []\n self.current_phoneme\n\n # use a neural net to predict the phonemes\n def read(self):\n # get predicitons for phonemes\n images = [transforms.ToTensor()(image).unsqueeze_(0) for image in self.get_images()]\n outputs = [self.net(image) for image in images]\n self.predictions = [torch.max(output, 1) for output in outputs]\n\n # convert predictions to letters\n self.predictions = [prediction[1].item() for prediction in self.predictions]\n self.predictions = [self.get_phoneme(prediction) for prediction in self.predictions]\n if self.predictions:\n self.phoneme_label.configure(text='Phonemes: '+''.join(self.predictions))\n else:\n self.phoneme_label.configure(text='No phonemes.')\n\n # speak phonemes aloud\n def speak(self):\n self.read()\n try:\n engine = pyttsx3.init()\n engine.say(''.join(self.predictions))\n engine.runAndWait()\n except:\n print('Cannot find text to speech engine.')\n\n # create a popup window to enter training labels\n def open_label_enter(self, label_num):\n self.label_window = Toplevel(self.window)\n directions_label = Label(self.label_window, text=f'Label for phoneme {label_num}:')\n directions_label.pack(side='top')\n self.label_entry = Entry(self.label_window)\n self.label_entry.pack(side='top', fill='x')\n self.label_entry.focus_set()\n self.label_entry.bind('', lambda event, label_window=self.label_window: self.close_label_enter(label_window))\n enter_button = Button(self.label_window, text='Enter', command=lambda:self.close_label_enter(self.label_window))\n enter_button.pack(side='bottom')\n\n # close the label entry popup window\n def close_label_enter(self, window, event=None):\n self.label = self.label_entry.get()\n window.destroy()\n\n # prompt the user to enter label data for each phoneme in a word\n def label_word(self, event=None):\n images = self.get_images()\n for index in range(len(images)):\n # prompt the user for a label\n self.open_label_enter(index + 1)\n self.window.wait_window(self.label_window)\n\n # make sure the directory for the label exists\n if not os.path.exists(DATA_PATH + self.label):\n os.makedirs(DATA_PATH + self.label)\n\n # store the image in the in the directory\n filename = str(time.time()) + '.jpg'\n images[index].save(DATA_PATH + self.label + '/' + filename)\n print(f'saved {filename}')\n\n self.clear()\n\n # add the current phoneme to the phoneme list\n def add_current_phoneme(self, event=None):\n if self.current_phoneme and (not self.loop_in_phoneme or len(self.current_phoneme) > LOOP_THRESHOLD):\n self.phoneme_list.append(self.current_phoneme)\n self.current_phoneme = self.current_phoneme[-1:]\n self.draw_phonemes()\n\n # draw the current phonemes in alternating colors\n def draw_phonemes(self):\n colors = ['navy', 'blue', 'light blue']\n color_index = 0\n for phoneme in self.phoneme_list:\n color_index = (color_index + 1) % len(colors)\n for index in range(len(phoneme)):\n x_prev = phoneme[index - 1][0] if index > 0 else phoneme[index][0]\n y_prev = phoneme[index - 1][1] if index > 0 else phoneme[index][1]\n self.canvas.create_line(x_prev, y_prev, phoneme[index][0], phoneme[index][1],\n width=LINE_WIDTH, fill=colors[color_index],\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n\n # start a new phoneme when the mouse is pressed\n def mouse_down(self, event):\n self.current_phoneme = [(event.x, event.y)]\n\n # draw a dot in case the button is immediately lifted\n self.x_prev = event.x\n self.y_prev = event.y\n self.prev_dir = None\n self.since_loop = 0\n self.loop_in_phoneme = False\n self.canvas.create_line(event.x, event.y, event.x, event.y,\n width=LINE_WIDTH, fill='black',\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n\n # draw and record the coordinates of mouse movements\n def mouse_move(self, event):\n if abs(event.x - self.x_prev) > LINE_RESOLUTION or abs(event.y - self.y_prev) > LINE_RESOLUTION:\n # create new phoneme on a sharp bend\n dir = math.atan2((self.x_prev - event.x), (self.y_prev - event.y))\n dir = 180 / math.pi * -dir\n if self.prev_dir:\n diff = dir - self.prev_dir\n if diff > 180: diff -= 360 \n if diff < -180: diff += 360\n if abs(diff) >= ANGLE_THRESHOLD:\n # we've encountered a sharp bend; create a new phoneme\n self.add_current_phoneme()\n self.prev_dir = dir\n\n # check if we're in a loop (i.e. we see a pixel we've already written to the current phoneme)\n self.since_loop += 1\n if len(self.current_phoneme) > LOOP_LENGTH:\n for index in range(len(self.current_phoneme) - LOOP_LENGTH):\n if (abs(event.x - self.current_phoneme[index][0]) <= LOOP_THRESHOLD \n and abs(event.y - self.current_phoneme[index][1]) <= LOOP_THRESHOLD\n and self.since_loop > LOOP_LENGTH):\n self.since_loop = 0\n self.loop_in_phoneme = True\n \n # splice the current phoneme into two\n if index > LOOP_THRESHOLD:\n self.phoneme_list.append(self.current_phoneme[0:index + 1])\n self.current_phoneme = self.current_phoneme[index:]\n self.add_current_phoneme()\n self.current_phoneme = []\n return\n\n # draw the movement to the canvas and record it in the current phoneme\n self.current_phoneme.append((event.x, event.y))\n self.canvas.create_line(self.x_prev, self.y_prev, event.x, event.y,\n width=LINE_WIDTH, fill='black',\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n self.x_prev = event.x\n self.y_prev = event.y\n\n# start Gregg recognition tool\nif __name__ == '__main__':\n Gregg()\n", "repo_name": "pjhale2/gregg-recognition-tool", "sub_path": "gregg.py", "file_name": "gregg.py", "file_ext": "py", "file_size_in_byte": 11118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tkinter.Tk", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tkinter.BOTTOM", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.Image.Image.new", "line_number": 92, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 92, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 92, "usage_type": "name"}, {"api_name": "PIL.Image.ImageDraw.Draw", "line_number": 93, "usage_type": "call"}, {"api_name": "PIL.Image.ImageDraw", "line_number": 93, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 93, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 122, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 124, "usage_type": "call"}, {"api_name": "pyttsx3.init", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 171, "usage_type": "call"}, {"api_name": "time.time", "line_number": 174, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 218, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 219, "usage_type": "attribute"}]} +{"seq_id": "21733638698", "text": "import os.path as op\nimport numpy as np\nimport pytest\n\nimport fsl_mrs.utils.mrs_io as mrsio\nfrom fsl_mrs.utils.fitting import fit_FSLModel\nimport fsl_mrs.utils.quantify as quant\nfrom fsl_mrs.utils.constants import STANDARD_T1, STANDARD_T2\n\nmetabfile = op.join(op.dirname(__file__), 'testdata/quantify/Cr_10mM_test_water_scaling_WS.txt')\nh2ofile = op.join(op.dirname(__file__), 'testdata/quantify/Cr_10mM_test_water_scaling_nWS.txt')\nbasisfile = op.join(op.dirname(__file__), 'testdata/quantify/basisset_JMRUI')\n\n\ndef test_QuantificationInfo():\n qci = quant.QuantificationInfo(0.000, 40, ['Cr', 'NAA'], 298)\n assert qci.relax_corr_water_molal > 55500\n assert qci.relax_corr_water_molar > 55500\n\n qci = quant.QuantificationInfo(0.000, 40, ['Cr', 'NAA'], 127)\n assert qci.relax_corr_water_molal > 55500\n assert qci.relax_corr_water_molar > 55500\n\n qci = quant.QuantificationInfo(0.010, 3, ['Cr', 'NAA'], 127)\n t2s = STANDARD_T2['3T']\n t1s = STANDARD_T1['3T']\n assert np.isclose(qci.R_H2O_WM, np.exp(-0.010 / t2s['H2O_WM']) * (1 - np.exp(-3 / t1s['H2O_WM'])))\n assert np.isclose(qci.R_H2O_GM, np.exp(-0.010 / t2s['H2O_GM']) * (1 - np.exp(-3 / t1s['H2O_GM'])))\n assert np.isclose(qci.R_H2O_CSF, np.exp(-0.010 / t2s['H2O_CSF']) * (1 - np.exp(-3 / t1s['H2O_CSF'])))\n\n qci = quant.QuantificationInfo(0.010, 3, ['Cr', 'NAA'], 298)\n t2s = STANDARD_T2['7T']\n t1s = STANDARD_T1['7T']\n assert np.isclose(qci.R_H2O_WM, np.exp(-0.010 / t2s['H2O_WM']) * (1 - np.exp(-3 / t1s['H2O_WM'])))\n assert np.isclose(qci.R_H2O_GM, np.exp(-0.010 / t2s['H2O_GM']) * (1 - np.exp(-3 / t1s['H2O_GM'])))\n assert np.isclose(qci.R_H2O_CSF, np.exp(-0.010 / t2s['H2O_CSF']) * (1 - np.exp(-3 / t1s['H2O_CSF'])))\n\n assert qci.ref_metab == 'Cr'\n assert qci.ref_protons == 5\n assert qci.ref_limits == (2, 5)\n\n qci = quant.QuantificationInfo(0.010, 3, ['NAA'], 298)\n assert qci.ref_metab == 'NAA'\n assert qci.ref_protons == 3\n assert qci.ref_limits == (1.8, 2.2)\n\n qci.set_fractions({'GM': 0.45, 'WM': 0.45, 'CSF': 0.1})\n assert qci._fractions is not None\n\n assert np.isclose(qci.csf_corr, 1 / 0.9)\n\n qci.add_corr = 5.0\n assert qci.add_corr == 5.0\n\n\ndef test_volumefraction_calc():\n qci = quant.QuantificationInfo(0.010, 3, ['NAA'], 298)\n qci.set_fractions({'GM': 0.45, 'WM': 0.40, 'CSF': 0.15})\n assert qci.f_GM == 0.45\n assert qci.f_WM == 0.40\n assert qci.f_CSF == 0.15\n\n with pytest.warns(UserWarning):\n qci.set_fractions({'GM': 0.49, 'WM': 0.49, 'CSF': 0.0})\n\n assert qci.f_GM == 0.5\n assert qci.f_WM == 0.5\n assert qci.f_CSF == 0.0\n\n with pytest.raises(ValueError) as exc_info:\n qci.set_fractions({'GM': 0.44, 'WM': 0.40, 'CSF': 0.05})\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"fractions must be a dict containing 'WM', 'GM', 'CSF' keys\"\\\n \", and must sum to 1. Currently they are:\"\\\n \" {'GM': 0.44, 'WM': 0.4, 'CSF': 0.05} (sum=0.8900).\"\n\n\ndef test_molefraction_calc():\n qci = quant.QuantificationInfo(0.010, 3, ['NAA'], 298)\n qci.set_fractions({'GM': 0.45, 'WM': 0.40, 'CSF': 0.15})\n\n # Densitites are 'GM': 0.78, 'WM': 0.65, 'CSF': 0.97\n sum_frac = (0.45 * 0.78 + 0.40 * 0.65 + 0.15 * 0.97)\n assert np.isclose(qci.f_GM_H2O, 0.45 * 0.78 / sum_frac)\n assert np.isclose(qci.f_WM_H2O, 0.40 * 0.65 / sum_frac)\n assert np.isclose(qci.f_CSF_H2O, 0.15 * 0.97 / sum_frac)\n\n\ndef test_corrected_water_conc():\n # No relaxation\n qci = quant.QuantificationInfo(1E-10, 1E5, ['NAA'], 298)\n qci.set_fractions({'GM': 1.00, 'WM': 0.0, 'CSF': 0.0})\n\n print(qci.relax_corr_water_molal)\n print(qci.relax_corr_water_molar)\n # Molality should be close to pure water as density term cancels\n assert np.isclose(qci.relax_corr_water_molal, 55510)\n # Molarity should be scaled by density term as volume fixed\n assert np.isclose(qci.relax_corr_water_molar, 55510 * 0.78)\n\n qci.set_fractions({'GM': 0.50, 'WM': 0.5, 'CSF': 0.0})\n\n print(qci.relax_corr_water_molal)\n print(qci.relax_corr_water_molar)\n # Molality should be close to pure water as density term cancels\n assert np.isclose(qci.relax_corr_water_molal, 55510)\n # Molarity should be scaled by density terms as volume fixed\n assert np.isclose(qci.relax_corr_water_molar, 55510 * (0.78 + 0.65) / 2)\n\n qci = quant.QuantificationInfo(1E-10, 1, ['NAA'], 298)\n qci.set_fractions({'GM': 0.50, 'WM': 0.5, 'CSF': 0.0})\n\n print(qci.relax_corr_water_molal)\n print(qci.relax_corr_water_molar)\n # Molality should scaled by relaxation terms in proportion of mole fraction.\n mf_gm = 0.5 * 0.78 / (0.5 * 0.78 + 0.5 * 0.65)\n mf_wm = 0.5 * 0.65 / (0.5 * 0.78 + 0.5 * 0.65)\n assert np.isclose(qci.relax_corr_water_molal, 55510 * (qci.R_H2O_GM * mf_gm + qci.R_H2O_WM * mf_wm))\n # Molarity should be scaled by density terms * relaxation terms\n assert np.isclose(qci.relax_corr_water_molar, 55510 * (0.78 * qci.R_H2O_GM + 0.65 * qci.R_H2O_WM) / 2)\n\n\ndef test_quantifyWater():\n basis = mrsio.read_basis(basisfile)\n data = mrsio.read_FID(metabfile)\n dataw = mrsio.read_FID(h2ofile)\n\n mrs = data.mrs(basis=basis,\n ref_data=dataw)\n mrs.keep = ['Cr']\n mrs.check_FID(repair=True)\n mrs.check_Basis(repair=True)\n\n Fitargs = {'ppmlim': [0.2, 5.2],\n 'method': 'MH',\n 'baseline_order': 0,\n 'metab_groups': [0]}\n\n res = fit_FSLModel(mrs, **Fitargs)\n\n tissueFractions = {'GM': 0.6, 'WM': 0.4, 'CSF': 0.0}\n TE = 0.03\n TR = 20\n T2dict = {'H2O_GM': 0.110,\n 'H2O_WM': 0.080,\n 'H2O_CSF': 2.55,\n 'METAB': 0.160}\n\n q_info = quant.QuantificationInfo(\n TE,\n TR,\n mrs.names,\n mrs.centralFrequency / 1E6,\n t2=T2dict)\n\n q_info.set_fractions(tissueFractions)\n\n res.calculateConcScaling(mrs,\n q_info,\n internal_reference=['Cr'],\n verbose=True)\n\n print(res.getConc(scaling='raw'))\n print(res.getConc(scaling='internal'))\n print(res.getConc(scaling='molality'))\n print(res.getConc(scaling='molarity'))\n\n assert np.allclose(res.getConc(scaling='internal'), 1.0)\n assert np.allclose(res.getConc(scaling='molarity'), 10.78, atol=3E-1)\n assert np.allclose(res.getConc(scaling='molality'), 10.78 * 1 / (0.6 * 0.78 + 0.4 * 0.65), atol=3E-1)\n", "repo_name": "wtclarke/fsl_mrs", "sub_path": "fsl_mrs/tests/test_utils_quantify.py", "file_name": "test_utils_quantify.py", "file_ext": "py", "file_size_in_byte": 6508, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 16, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 16, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 20, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 20, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 24, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 24, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.constants.STANDARD_T2", "line_number": 25, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.constants.STANDARD_T1", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 31, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 31, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.constants.STANDARD_T2", "line_number": 32, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.constants.STANDARD_T1", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 42, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 50, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 57, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 57, "usage_type": "name"}, {"api_name": "pytest.warns", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 70, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 80, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 87, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 92, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 109, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 111, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 111, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 121, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.mrs_io.read_basis", "line_number": 125, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.mrs_io", "line_number": 125, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.mrs_io.read_FID", "line_number": 126, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.mrs_io", "line_number": 126, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.mrs_io.read_FID", "line_number": 127, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.mrs_io", "line_number": 127, "usage_type": "name"}, {"api_name": "fsl_mrs.utils.fitting.fit_FSLModel", "line_number": 140, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify.QuantificationInfo", "line_number": 150, "usage_type": "call"}, {"api_name": "fsl_mrs.utils.quantify", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "26158863484", "text": "\"\"\"S3 Connector module.\"\"\"\n\n###############################################################################\n# IMPORTS ########################################################### IMPORTS #\n###############################################################################\n\n# Standard library\nimport dataclasses\nimport logging\nimport traceback\n\n# Installed\nimport boto3\nimport botocore\n\n# Own modules\nimport dds_cli.utils\nfrom dds_cli import DDSEndpoint\n\n###############################################################################\n# LOGGING ########################################################### LOGGING #\n###############################################################################\n\nLOG = logging.getLogger(__name__)\n\n###############################################################################\n# CLASSES ########################################################### CLASSES #\n###############################################################################\n\n\n@dataclasses.dataclass\nclass S3Connector:\n \"\"\"Connect to Simple Storage Service.\"\"\"\n\n project_id: dataclasses.InitVar[str]\n token: dataclasses.InitVar[dict]\n safespring_project: str = dataclasses.field(init=False)\n keys: dict = dataclasses.field(init=False)\n url: str = dataclasses.field(init=False)\n bucketname: str = dataclasses.field(init=False)\n resource = None\n\n def __post_init__(self, project_id, token):\n \"\"\"Initiate S3Connector object by getting s3 info from API.\"\"\"\n (\n self.safespring_project,\n self.keys,\n self.url,\n self.bucketname,\n ) = self.__get_s3_info(project_id=project_id, token=token)\n\n # @connect_cloud\n def __enter__(self):\n \"\"\"Enter context.\"\"\"\n self.resource = self.connect()\n\n return self\n\n def __exit__(self, exc_type, exc_value, traceb):\n \"\"\"Close context manager, incl. connection.\"\"\"\n if exc_type is not None:\n traceback.print_exception(exc_type, exc_value, traceb)\n return False # uncomment to pass exception through\n\n return True\n\n def connect(self):\n \"\"\"Connect to S3 resource.\"\"\"\n # Connect to service\n try:\n session = boto3.session.Session()\n\n resource = session.resource(\n service_name=\"s3\",\n endpoint_url=self.url,\n aws_access_key_id=self.keys[\"access_key\"],\n aws_secret_access_key=self.keys[\"secret_key\"],\n )\n except (boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError) as err:\n LOG.warning(\"S3 connection failed: %s\", err)\n raise\n\n LOG.debug(\"Connected to S3.\")\n return resource\n\n # Static methods ############ Static methods #\n @staticmethod\n def __get_s3_info(project_id, token):\n \"\"\"Get information required to connect to cloud.\"\"\"\n # Perform request to API\n s3info, _ = dds_cli.utils.perform_request(\n DDSEndpoint.S3KEYS,\n method=\"get\",\n params={\"project\": project_id},\n headers=token,\n error_message=\"Failed to get cloud information\",\n )\n\n # Get s3 info\n\n safespring_project, keys, url, bucket = (\n s3info.get(\"safespring_project\"),\n s3info.get(\"keys\"),\n s3info.get(\"url\"),\n s3info.get(\"bucket\"),\n )\n if None in [safespring_project, keys, url, bucket]:\n raise SystemExit(\"Missing safespring information in response.\") # TODO: change\n\n return safespring_project, keys, url, bucket\n", "repo_name": "ScilifelabDataCentre/dds_cli", "sub_path": "dds_cli/s3_connector.py", "file_name": "s3_connector.py", "file_ext": "py", "file_size_in_byte": 3613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "dataclasses.InitVar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "dataclasses.InitVar", "line_number": 36, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 37, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 38, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 39, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 40, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 62, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 71, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 71, "usage_type": "attribute"}, {"api_name": "boto3.exceptions", "line_number": 79, "usage_type": "attribute"}, {"api_name": "botocore.exceptions", "line_number": 79, "usage_type": "attribute"}, {"api_name": "dds_cli.utils.utils.perform_request", "line_number": 91, "usage_type": "call"}, {"api_name": "dds_cli.utils.utils", "line_number": 91, "usage_type": "attribute"}, {"api_name": "dds_cli.utils", "line_number": 91, "usage_type": "name"}, {"api_name": "dds_cli.DDSEndpoint.S3KEYS", "line_number": 92, "usage_type": "attribute"}, {"api_name": "dds_cli.DDSEndpoint", "line_number": 92, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "28419865102", "text": "# -*- coding:utf-8 -*-\nimport argparse\nimport os\nfrom datetime import datetime\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom math import ceil\n#from scipy import interpolate\nimport cv2\n\n# PyTorch\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torchvision # 画像処理関連\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nfrom tensorboardX import SummaryWriter\n\n# 自作クラス\nfrom networks import ProgressiveGenerator, ProgressiveDiscriminator\nfrom utils import save_checkpoint, load_checkpoint\nfrom utils import board_add_image, board_add_images\nfrom utils import save_image_historys_gif\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--exper_name\", default=\"PGGAN_train\", help=\"実験名\")\n parser.add_argument('--device', choices=['cpu', 'gpu'], default=\"gpu\", help=\"使用デバイス (CPU or GPU)\")\n #parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') \n parser.add_argument('--dataset', choices=['mnist', 'cifar-10'], default=\"mnist\", help=\"データセットの種類(MNIST or CIFAR-10)\")\n parser.add_argument('--dataset_dir', type=str, default=\"dataset\", help=\"データセットのディレクトリ\")\n parser.add_argument('--results_dir', type=str, default=\"results\", help=\"生成画像の出力ディレクトリ\")\n parser.add_argument('--load_checkpoints_dir', type=str, default=\"\", help=\"モデルの読み込みディレクトリ\")\n parser.add_argument('--n_samplings', type=int, default=100, help=\"サンプリング数\")\n parser.add_argument('--batch_size', type=int, default=63, help=\"バッチサイズ\")\n parser.add_argument(\"--init_image_size\", type = int, default = 4 )\n parser.add_argument(\"--final_image_size\", type = int, default = 32 )\n parser.add_argument('--n_input_noize_z', type=int, default=128, help=\"生成器に入力するノイズ z の次数\")\n parser.add_argument(\"--fps\", type=float, default=30.0, help=\"モーフィング動画のFPS\")\n parser.add_argument('--codec', choices=['mp4','gif'], default=\"mp4\", help=\"動画のコーデック\")\n parser.add_argument(\"--seed\", type=int, default=0, help=\"乱数シード値\")\n parser.add_argument('--debug', action='store_true', help=\"デバッグモード有効化\")\n args = parser.parse_args()\n\n # 実行条件の出力\n print( \"----------------------------------------------\" )\n print( \"実行条件\" )\n print( \"----------------------------------------------\" )\n print( \"開始時間:\", datetime.now() )\n print( \"PyTorch version :\", torch.__version__ )\n for key, value in vars(args).items():\n print('%s: %s' % (str(key), str(value)))\n\n # 実行 Device の設定\n if( args.device == \"gpu\" ):\n use_cuda = torch.cuda.is_available()\n if( use_cuda == True ):\n device = torch.device( \"cuda\" )\n #torch.cuda.set_device(args.gpu_ids[0])\n print( \"実行デバイス :\", device)\n print( \"GPU名 :\", torch.cuda.get_device_name(device))\n print(\"torch.cuda.current_device() =\", torch.cuda.current_device())\n else:\n print( \"can't using gpu.\" )\n device = torch.device( \"cpu\" )\n print( \"実行デバイス :\", device)\n else:\n device = torch.device( \"cpu\" )\n print( \"実行デバイス :\", device)\n\n print('-------------- End ----------------------------')\n\n # 各種出力ディレクトリ\n if not( os.path.exists(args.results_dir) ):\n os.mkdir(args.results_dir)\n if not( os.path.exists(os.path.join(args.results_dir, args.exper_name)) ):\n os.mkdir( os.path.join(args.results_dir, args.exper_name) )\n\n # seed 値の固定\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n #======================================================================\n # データセットを読み込み or 生成\n # データの前処理\n #======================================================================\n pass\n\n #======================================================================\n # モデルの構造を定義する。\n #======================================================================\n # Genrator\n if( args.dataset == \"mnist\" ):\n model_G = ProgressiveGenerator(\n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_input_noize_z = args.n_input_noize_z,\n n_rgb = 1,\n ).to( device )\n else:\n model_G = ProgressiveGenerator(\n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_input_noize_z = args.n_input_noize_z,\n n_rgb = 3,\n ).to( device )\n\n # Discriminator\n if( args.dataset == \"mnist\" ):\n model_D = ProgressiveDiscriminator(\n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_fmaps = args.n_input_noize_z,\n n_rgb = 1,\n ).to( device )\n else:\n model_D = ProgressiveDiscriminator( \n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_fmaps = args.n_input_noize_z,\n n_rgb = 3,\n ).to( device )\n \n if( args.debug ):\n print( \"model_G :\\n\", model_G )\n print( \"model_D :\\n\", model_D )\n\n # モデルを読み込む\n if not args.load_checkpoints_dir == '' and os.path.exists(args.load_checkpoints_dir):\n init_step = load_checkpoint(model_G, device, os.path.join(args.load_checkpoints_dir, \"G\", \"G_final.pth\") )\n init_step = load_checkpoint(model_D, device, os.path.join(args.load_checkpoints_dir, \"D\", \"D_final.pth\") )\n\n #======================================================================\n # モデルの学習処理\n #======================================================================\n # 入力ノイズ z\n input_noize_z1 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n input_noize_z2 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n #input_noize_z3 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n #input_noize_z4 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n #input_noize_z5 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n\n #\n final_progress = float(np.log2(args.final_image_size)) -2\n\n #======================================================================\n # モーフィング(z1 -> z2)\n #======================================================================\n input_noize_z_src = input_noize_z1\n input_noize_z_target = input_noize_z2\n dz = ( input_noize_z_target - input_noize_z_src ) / args.n_samplings\n if( args.debug ):\n print( \"input_noize_z_src[0,0:10,0,0]\", input_noize_z_src[0,0:10,0,0] )\n print( \"input_noize_z_target[0,0:10,0,0]\", input_noize_z_target[0,0:10,0,0] )\n print( \"dz[0,0:10,0,0]\", dz[0,0:10,0,0] )\n\n print(\"Starting Test Loop...\")\n n_print = 1\n model_G.eval()\n model_D.eval()\n for step in tqdm( range(args.n_samplings+1), desc = \"Samplings\" ):\n # 入力ノイズを線形補間\n input_noize_z = input_noize_z_src + step * dz\n\n # 生成器 G の 推論処理\n with torch.no_grad():\n # G(z) : 生成器から出力される偽物画像\n G_z = model_G( input_noize_z, final_progress )\n\n # 出力画像の生成&保存\n save_image( tensor = G_z, filename = os.path.join(args.results_dir, args.exper_name) + \"/frame_{0:04d}.png\".format( step ) )\n n_print -= 1\n\n print(\"Finished Test Loop.\")\n\n #======================================================================\n # 生成した連番画像を動画化\n #======================================================================\n if( args.codec == \"gif\"):\n frames = []\n for step in range(args.n_samplings+1):\n img = Image.open( os.path.join(args.results_dir, args.exper_name) + \"/frame_{0:04d}.png\".format(step) )\n frames.append(img)\n\n frames[0].save(\n os.path.join(args.results_dir, args.exper_name) + \"/morphing_video.gif\",\n save_all=True,\n append_images=frames[1:]\n )\n\n else:\n img = cv2.imread(os.path.join(args.results_dir, args.exper_name) + \"/frame_0000.png\" )\n width, height = img.shape[1], img.shape[0]\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n video = cv2.VideoWriter( \n os.path.join(args.results_dir, args.exper_name) + \"/morphing_video.mp4\", \n fourcc, args.fps, \n (width, height)\n )\n\n for step in range(args.n_samplings+1):\n img = cv2.imread(os.path.join(args.results_dir, args.exper_name) + \"/frame_{0:04d}.png\".format(step))\n video.write(img)\n\n video.release()\n", "repo_name": "Yagami360/MachineLearning_Exercises_Python_PyTorch", "sub_path": "GAN_PGGAN_PyTorch/test_morphing.py", "file_name": "test_morphing.py", "file_ext": "py", "file_size_in_byte": 9260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.__version__", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.cuda.get_device_name", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.cuda.current_device", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 86, "usage_type": "call"}, {"api_name": "networks.ProgressiveGenerator", "line_number": 99, "usage_type": "call"}, {"api_name": "networks.ProgressiveGenerator", "line_number": 106, "usage_type": "call"}, {"api_name": "networks.ProgressiveDiscriminator", "line_number": 115, "usage_type": "call"}, {"api_name": "networks.ProgressiveDiscriminator", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "utils.load_checkpoint", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "utils.load_checkpoint", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 149, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 171, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 187, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 187, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 199, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}]} +{"seq_id": "17776785954", "text": "import os\n# source: https://sabuhish.github.io/fastapi-mail/example/\n\nfrom fastapi_mail import FastMail, MessageSchema, ConnectionConfig\nfrom starlette.responses import JSONResponse\n#from starlette.requests import Request\nfrom fastapi_mail import FastMail, MessageSchema, ConnectionConfig\nfrom pydantic import EmailStr\nfrom typing import List\nfrom utils import get_env_variable\n#from fastapi_mail.email_utils import DefaultChecker\n\ndef mail_service_conf():\n return ConnectionConfig(\n MAIL_USERNAME = get_env_variable(\"MAIL_USERNAME\"),\n MAIL_PASSWORD = get_env_variable(\"MAIL_PASSWORD\"),\n MAIL_FROM = get_env_variable(\"MAIL_USERNAME\"),\n MAIL_PORT = 587,\n MAIL_SERVER = \"smtp.gmail.com\",\n MAIL_FROM_NAME=\"Packer Solver\",\n MAIL_TLS = True,\n MAIL_SSL = False,\n USE_CREDENTIALS = True,\n VALIDATE_CERTS = True\n )\n\nasync def send_test_email(recipients: List[EmailStr]) -> JSONResponse:\n try:\n message = MessageSchema(subject=\"fastapi_mail Test\", recipients=recipients, body=\"Hello!\")\n fm = FastMail(mail_service_conf())\n await fm.send_message(message)\n return JSONResponse(status_code=200, content={\"message\": \"email has been sent\"})\n except Exception as ex:\n return JSONResponse(status_code=400, content={\"message\": f\"{ex}\"})\n\nasync def send_to_one(recipient: EmailStr, subject: str, body: str) -> JSONResponse:\n try:\n message = MessageSchema(\n subject = subject,\n recipients = [recipient],\n body = body\n )\n\n fm = FastMail(mail_service_conf())\n await fm.send_message(message)\n return JSONResponse(status_code=200, content={\"message\": f\"email has been sent to {recipient}\"})\n\n except Exception as ex:\n return JSONResponse(status_code=400, content={\"message\": f\"{ex}\"})\n", "repo_name": "urmaspitsi/PackerUserManagement", "sub_path": "emailer.py", "file_name": "emailer.py", "file_ext": "py", "file_size_in_byte": 1776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi_mail.ConnectionConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.get_env_variable", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.get_env_variable", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.get_env_variable", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "pydantic.EmailStr", "line_number": 27, "usage_type": "name"}, {"api_name": "fastapi_mail.MessageSchema", "line_number": 29, "usage_type": "call"}, {"api_name": "fastapi_mail.FastMail", "line_number": 30, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 27, "usage_type": "name"}, {"api_name": "pydantic.EmailStr", "line_number": 36, "usage_type": "name"}, {"api_name": "fastapi_mail.MessageSchema", "line_number": 38, "usage_type": "call"}, {"api_name": "fastapi_mail.FastMail", "line_number": 44, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 46, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "6772135836", "text": "#(c) 2012 Massachusetts Institute of Technology. All Rights Reserved\n# Code written by: Maksim Imakaev (imakaev@mit.edu)\n\n\"\"\"\nSome important utilities from Max. This includes:\n\nSet exception hook to pdb\nRun in separate process\nfork-map\nfork-map-reduce\nfork-map-average\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport sys\nimport pickle\nimport pdb\nimport traceback\nimport warnings\nimport subprocess\nimport numpy as np\nfrom copy import copy\nimport logging\nfrom functools import reduce\nlog = logging.getLogger(__name__)\n\n\ndef commandExists(command):\n \"checks if the bash command exists\"\n command = command.split()[0]\n if subprocess.call(['which', command]) != 0:\n return False\n return True\n\ndef gzipWriter(filename, pigzArguments=(\"-4\",)):\n \"\"\"\n creates a writing process with gzip or parallel gzip (pigz) attached to it\n \"\"\"\n filename = os.path.abspath(filename)\n with open(filename, 'wb') as outFile:\n if commandExists(\"pigz\"):\n writer = [\"pigz\", \"-c\"] + list(pigzArguments)\n else:\n writer = [\"gzip\", \"-c\", \"-1\"]\n warnings.warn(\"Please install 'pigz' parallel gzip for faster speed\")\n\n pwrite = subprocess.Popen(writer, stdin=subprocess.PIPE, stdout=outFile, shell=False, bufsize=-1)\n log.info(\"\"\"Writer created with command \"{0}\" \"\"\".format(writer))\n return pwrite\n\ndef _exceptionHook(infoType, value, tb):\n \"Exception hook\"\n traceback.print_exception(infoType, value, tb)\n print()\n pdb.post_mortem(tb)\n\n\ndef setExceptionHook():\n \"sets exception hook to pdb\"\n sys.excepthook = _exceptionHook\n\n\nclass transparentDict(dict): # transparent dictionary, that returns the key\n def __missing__(self, key):\n return key\n\n\ndef run_in_separate_process(func, *args, **kwds):\n pread, pwrite = os.pipe()\n pid = os.fork()\n if pid > 0:\n os.close(pwrite)\n with os.fdopen(pread, 'rb') as f:\n status, result = pickle.load(f)\n os.waitpid(pid, 0)\n if status == 0:\n return result\n else:\n raise result\n else:\n os.close(pread)\n try:\n result = func(*args, **kwds)\n status = 0\n except Exception as exc:\n result = exc\n status = 1\n with os.fdopen(pwrite, 'wb') as f:\n try:\n pickle.dump((status, result), f, pickle.HIGHEST_PROTOCOL)\n except pickle.PicklingError as exc:\n pickle.dump((2, exc), f, pickle.HIGHEST_PROTOCOL)\n os._exit(0)\n\n\ndef deprecate(newFunction, oldFunctionName=None, message=None):\n \"\"\"If you rename your function, you can use this to issue deprecation warning for the old name\n Juse use newFunction = deprecate(oldFunction)\"\"\"\n try:\n newName = newFunction.__name__\n except:\n newName = \"_UndeterminedName_\"\n if oldFunctionName is None:\n oldFunctionName = \"_UnspecifiedName_\"\n if message == None:\n message = \"Function %s was renamed to %s\" % (\n oldFunctionName, newName)\n\n def oldFunction(*args, **kwargs):\n warnings.warn(message)\n return newFunction(*args, **kwargs)\n return oldFunction\n\ndef _nprocessors():\n if sys.platform == 'darwin':\n try:\n from multiprocessing import cpu_count\n return cpu_count()\n except NotImplementedError:\n pass\n else:\n # Cygwin (Windows) and Linuxes\n # Could try sysconf(_SC_NPROCESSORS_ONLN) (LSB) next. Instead, count processors in cpuinfo.\n try:\n s = open('/proc/cpuinfo', 'r').read()\n return s.replace(' ', '').replace('\\t', '').count('processor:')\n except:\n pass\n return 4\n\nnproc = _nprocessors()\n\ndef fmap(f, *a, **kw):\n \"\"\"\n forkmap.map(..., n=nprocessors), same as map(...).\n n must be a keyword arg; default n is number of physical processors.\n \"\"\"\n n = max([kw.get(i, 0) for i in ['n','N', \"nproc\", \"Nproc\", \"NProc\"]])\n if n == 0:\n n = nproc\n\n if n == 1:\n return list(map(f, *a))\n\n L = list(zip(*a))\n n = min(n, len(L))\n\n ans = [None] * len(L)\n pipes = [os.pipe() for i in range(n - 1)]\n\n for i in range(n):\n if i < n - 1 and not os.fork(): # Child, and not last processor\n try:\n try:\n obj = [f(*x) for x in L[i::n]]\n except Exception as obj:\n pass\n with os.fdopen(pipes[i][1],'wb') as f:\n pickle.dump(obj,f, protocol=pickle.HIGHEST_PROTOCOL)\n except:\n traceback.print_exc()\n finally:\n os._exit(0)\n elif i == n - 1: # parent\n try:\n ans[i::n] = [f(*x) for x in L[i::n]]\n for k in range(n - 1):\n with os.fdopen(pipes[k][0],'rb') as f:\n obj = pickle.load(f)\n if isinstance(obj, Exception):\n raise obj\n ans[k::n] = obj\n finally:\n for j in range(n - 1):\n os.wait()\n return ans\n\n\n\n\ndef _testFmap():\n\n for i in range(1, 300):\n print(i)\n a = list(range(i))\n for j in range(1, 10):\n b = fmap(lambda x:x, a, n=j)\n assert (np.array(a) == np.array(b)).all()\n\ndef _fmapredcount(function, data, reduction=lambda x, y: x + y, n=4, exceptionList=[IOError]):\n \"\"\"fork-map-reduce\n Performs fork-map of function on data, automatically reducing the data inside each worker.\n If evaluation throws the exception from exceptionList, this results are simply ignored\n \"\"\"\n def funsum(x, y):\n \"\"\"reduces two x[0],y[0], keeping track of # of\n successful evaluations that were made\n Also keeps track of None's that can occur if evaluation failed\"\"\"\n if x is None:\n if y is None:\n return None\n else:\n return y\n else:\n if y is None:\n return x\n else:\n return (reduction(x[0], y[0]), x[1] + y[1])\n\n def newfunction(x):\n try:\n \"if function is evaluated, it was evaluated one time\"\n return function(x), 1\n except tuple(exceptionList):\n return None\n\n if len(data) < n:\n n = len(data)\n datas = []\n\n for i in range(n):\n datas.append(copy(data[i::n])) # split like that if beginning and end of the array have different evaluation time\n\n def worker(dataList):\n dataList[0] = newfunction(dataList[0])\n return reduce(lambda z, y: funsum(z, newfunction(y)), dataList) # reducing newfunction with our new reduction algorithm\n\n reduced = fmap(worker, datas, n=n)\n return reduce(funsum, reduced)\n\n\ndef fmapred(function, data, reduction=lambda x, y: x + y, n=4, exceptionList=[IOError]):\n \"\"\"reduces two x[0],y[0], keeping track of # of\n successful evaluations that were made\n Also ignores failed evaluations with exceptions from exceptionList.\n\n Parameters\n ----------\n function : function\n function to be applied to the data\n data : iterable\n input data\n reduction : function, optional\n Reduction function. By default - sum\n n : int, optional\n number of CPUs\n exceptionList : list, optional\n list of exceptions to be ignored during reduction. By default, only IOError is ignored.\n \"\"\"\n return _fmapredcount(function, data, reduction=reduction, n=n, exceptionList=exceptionList)[0]\n\n\ndef fmapav(function, data, reduction=lambda x, y: x + y, n=4, exceptionList=[IOError]):\n \"\"\"Calculates averate of [fucntion(i) for i in data]\n Also ignores failed evaluations with exceptions from exceptionList.\n\n Parameters\n ----------\n function : function\n function to be applied to the data\n data : iterable\n input data\n reduction : function, optional\n Reduction function. By default - sum\n n : int, optional\n number of CPUs\n exceptionList : list, optional\n list of exceptions to be ignored during reduction. By default, only IOError is ignored.\n \"\"\"\n\n a = _fmapredcount(function, data, reduction=reduction, n=n,\n exceptionList=exceptionList)\n return a[0] / float(a[1])\n", "repo_name": "wangyibin/TDGP", "sub_path": "apps/systemutils.py", "file_name": "systemutils.py", "file_ext": "py", "file_size_in_byte": 8407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 45, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 47, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "traceback.print_exception", "line_number": 53, "usage_type": "call"}, {"api_name": "pdb.post_mortem", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.excepthook", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.pipe", "line_number": 69, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 70, "usage_type": "call"}, {"api_name": "os.close", "line_number": 72, "usage_type": "call"}, {"api_name": "os.fdopen", "line_number": 73, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 74, "usage_type": "call"}, {"api_name": "os.waitpid", "line_number": 75, "usage_type": "call"}, {"api_name": "os.close", "line_number": 81, "usage_type": "call"}, {"api_name": "os.fdopen", "line_number": 88, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 90, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pickle.PicklingError", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 92, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 93, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 110, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 115, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 118, "usage_type": "call"}, {"api_name": "os.pipe", "line_number": 149, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 152, "usage_type": "call"}, {"api_name": "os.fdopen", "line_number": 158, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 159, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 159, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 161, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 163, "usage_type": "call"}, {"api_name": "os.fdopen", "line_number": 168, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 169, "usage_type": "call"}, {"api_name": "os.wait", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 222, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 226, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "8967127292", "text": "\"\"\"resume_builder URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom resume_builderApp import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name='home'),\n path('person_create/', views.person_create, name='person_create'),\n path('projectorjob_create/', views.projectorjob_create, name='projectorjob_create'),\n path('areaofinterest_create/', views.areaofinterest_create, name='areaofinterest_create'),\n path('academicform_create/', views.academicform_create, name='academicform_create'),\n path('educationform_create/', views.educationform_create, name='educationform_create'),\n path('professionalskill_create/', views.professionalskill_create, name='professionalskill_create'),\n path('resume/', views.view, name='view'),\n path('download//', views.resumes, name='download'),\n]\n", "repo_name": "rabhi1611/Resume-Builder-Django", "sub_path": "resume_builder/resume_builder/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "resume_builderApp.views.home", "line_number": 22, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "resume_builderApp.views.person_create", "line_number": 23, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "resume_builderApp.views.projectorjob_create", "line_number": 24, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "resume_builderApp.views.areaofinterest_create", "line_number": 25, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "resume_builderApp.views.academicform_create", "line_number": 26, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "resume_builderApp.views.educationform_create", "line_number": 27, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "resume_builderApp.views.professionalskill_create", "line_number": 28, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "resume_builderApp.views.view", "line_number": 29, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "resume_builderApp.views.resumes", "line_number": 30, "usage_type": "attribute"}, {"api_name": "resume_builderApp.views", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "264323615", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#export OPENAI_API_KEY=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\n\nimport re\nimport requests\nfrom bs4 import BeautifulSoup # type: ignore\n\n# write a function that can extract text from a list of URLs and returns a text documents per URL\ndef get_documents(urls: list) -> dict[str, str]:\n \"\"\"Extract text from a list of URLs and returns a text documents per URL\n\n Args:\n urls (list): list of URLs\n\n Returns:\n list: list of text documents\n \"\"\"\n documents = {}\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',\n 'Accept-Language': 'en-US,en;q=0.9',\n # Add more headers if necessary\n }\n for url in urls:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n # Extract the text content from the response\n # soup = BeautifulSoup(response.content, 'html.parser')\n text = extract_text_with_links(response.content)\n # text = soup.get_text()\n # text = re.sub(r'\\n{2,}', '\\n', text)\n documents[url] = text\n elif response.status_code == 403:\n print(f\"403 Forbidden: Access to the webpage is restricted. {response.reason}\")\n else:\n print(f\"Error: {response.status_code} - Unable to access the webpage.\")\n return documents\n\n\ndef extract_text_with_links(html):\n # Create a BeautifulSoup object\n soup = BeautifulSoup(html, 'html.parser')\n\n # Remove unwanted tags\n unwanted_tags = ['script', 'style']\n for tag in soup.find_all(unwanted_tags):\n tag.decompose()\n\n # Process specific tags\n for a in soup.find_all('a'):\n text = a.get_text(strip=True)\n href = a.get('href')\n if text and href:\n a.string = f'{text} ({href})'\n\n for br in soup.find_all('br'):\n br.insert_after('\\n')\n\n for p in soup.find_all('p'):\n p.insert_after('\\n\\n')\n for child in p.find_all(recursive=False):\n if child.name != 'br':\n child.insert_before(' ')\n\n for heading in soup.find_all(re.compile('^h[1-6]$')):\n heading.insert_after('\\n\\n')\n\n # Extract text\n text = soup.get_text(separator=' ')\n\n text = re.sub(r'\\n+', '\\n', text.strip())\n text = re.sub(r'\\n\\s+', '\\n', text.strip())\n # text = re.sub(r'\\s+', ' ', text)\n\n return text\n", "repo_name": "itissid/Drop-PoT", "sub_path": "src/drop_backend/utils/scraping.py", "file_name": "scraping.py", "file_ext": "py", "file_size_in_byte": 2495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 66, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 72, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "34134124442", "text": "import glob\nfrom tqdm import tqdm\nimport os\nimport fire\n\nfrom Rignak_Misc.path import create_path\n\n# python divide_dataset.py E:\\datasets\\waifu2latent\n\ndef divide_dataset(folder, train_to_val_ratio=10):\n labels = [os.path.join(folder, subfolder) for subfolder in os.listdir(folder) if os.path.isdir(os.path.join(folder, subfolder))]\n for label in tqdm(labels):\n train_folder = os.path.join(folder, 'train', os.path.split(label)[-1])\n val_folder = os.path.join(folder, 'val', os.path.split(label)[-1])\n create_path(train_folder)\n create_path(val_folder)\n for i, filename in tqdm(enumerate(glob.glob(os.path.join(label, '*.png')))):\n new_filename = os.path.join(train_folder, os.path.split(filename)[-1]) if i % train_to_val_ratio else os.path.join(val_folder, os.path.split(filename)[-1])\n os.rename(filename, new_filename)\n\n\nif __name__ == '__main__':\n fire.Fire(divide_dataset)\n", "repo_name": "AurelienColin/ImageProcessing", "sub_path": "divide_dataset.py", "file_name": "divide_dataset.py", "file_ext": "py", "file_size_in_byte": 945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 11, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 14, "usage_type": "call"}, {"api_name": "Rignak_Misc.path.create_path", "line_number": 15, "usage_type": "call"}, {"api_name": "Rignak_Misc.path.create_path", "line_number": 16, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 17, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 18, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 19, "usage_type": "call"}, {"api_name": "fire.Fire", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "5007923421", "text": "from django.test import TestCase\nfrom django.test import TestCase\nfrom ..models.habitacion import Habitacion\nfrom ..models.reserva import Reserva\nfrom ..forms.reserva_form import ReservaFechasForm, ReservaHabitacionForm\n\n\nclass ReservaTestCase(TestCase):\n def setUp(self) -> None:\n Habitacion.objects.create(tipo=\"Individual\", precio=25, n_habitaciones=6, capacidad=1)\n h_doble = Habitacion.objects.create(tipo=\"Doble\", precio=46, n_habitaciones=15, capacidad=2)\n\n Reserva.objects.create(localizador=\"LOCALIZADOR\", fecha_entrada=\"2020-06-25\", fecha_salida=\"2020-06-30\",\n tipo_habitacion=h_doble, n_huespedes=1, huesped_nombre=\"Nombre\", huesped_email=\"Email@email.com\",\n huesped_telefono=\"9595959595\", precio_total=230.0)\n\n\n def test_lista_reservas(self):\n \"\"\"\n Test para comprobar que la BD funciona correctamente.\n Se comprueba que exista al menos una reserva en la BD\n \"\"\"\n self.assertEqual(Reserva.objects.all().count(), 1)\n\n \n def test_lista_habitaciones(self):\n \"\"\"\n Test para comprobar que la BD funciona correctamente.\n Se comprueba que existan al menos dos habitaciones en la BD\n \"\"\"\n self.assertEqual(Habitacion.objects.all().count(), 2)\n\n\n def test_form_fechas(self):\n \"\"\"\n Test para comprobar que el formulario para introducir los datos de reserva funciona correctamente.\n \"\"\"\n form_correcto = ReservaFechasForm(data={\"fecha_entrada\": \"25/06/2020\", \"fecha_salida\": \"30/06/2020\", \"n_huespedes\": 1})\n self.assertEqual(form_correcto.is_valid(), True)\n\n form_huespedes_incorrectos = ReservaFechasForm(data={\"fecha_entrada\": \"25/06/2020\", \"fecha_salida\": \"30/06/2020\", \"n_huespedes\": -1})\n self.assertEqual(form_huespedes_incorrectos.is_valid(), False)\n\n form_fechas_incorrectos = ReservaFechasForm(data={\"fecha_entrada\": \"30/06/2020\", \"fecha_salida\": \"25/06/2020\", \"n_huespedes\": 1})\n self.assertEqual(form_fechas_incorrectos.is_valid(), False)\n\n\n def test_form_reserva_habitacion(self):\n \"\"\"\n Test para comprobar que el formulario para introducir los datos del huesped funciona correctamente.\n \"\"\"\n form_correcto = ReservaHabitacionForm(data={\"nombre\": \"Nombre\", \"email\": \"email@email.com\", \"telefono\": \"+34 694-35-36-65\"})\n self.assertEqual(form_correcto.is_valid(), True)\n\n form_email_incorrecto = ReservaHabitacionForm(data={\"nombre\": \"Nombre\", \"email\": \"emailemail.com\", \"telefono\": \"+34 694-35-36-65\"})\n self.assertEqual(form_email_incorrecto.is_valid(), False)\n\n form_telefono_incorrecto = ReservaHabitacionForm(data={\"nombre\": \"Nombre\", \"email\": \"emailemail.com\", \"telefono\": \"+34 asd\"})\n self.assertEqual(form_telefono_incorrecto.is_valid(), False)\n\n", "repo_name": "joseka1234/ChappSolutionsTest", "sub_path": "buscadorApp/test/test_reserva.py", "file_name": "test_reserva.py", "file_ext": "py", "file_size_in_byte": 2863, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.test.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "models.habitacion.Habitacion.objects.create", "line_number": 10, "usage_type": "call"}, {"api_name": "models.habitacion.Habitacion.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.habitacion.Habitacion", "line_number": 10, "usage_type": "name"}, {"api_name": "models.habitacion.Habitacion.objects.create", "line_number": 11, "usage_type": "call"}, {"api_name": "models.habitacion.Habitacion.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.habitacion.Habitacion", "line_number": 11, "usage_type": "name"}, {"api_name": "models.reserva.Reserva.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "models.reserva.Reserva.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.reserva.Reserva", "line_number": 13, "usage_type": "name"}, {"api_name": "models.reserva.Reserva.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.reserva.Reserva.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.reserva.Reserva", "line_number": 23, "usage_type": "name"}, {"api_name": "models.habitacion.Habitacion.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "models.habitacion.Habitacion.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.habitacion.Habitacion", "line_number": 31, "usage_type": "name"}, {"api_name": "forms.reserva_form.ReservaFechasForm", "line_number": 38, "usage_type": "call"}, {"api_name": "forms.reserva_form.ReservaFechasForm", "line_number": 41, "usage_type": "call"}, {"api_name": "forms.reserva_form.ReservaFechasForm", "line_number": 44, "usage_type": "call"}, {"api_name": "forms.reserva_form.ReservaHabitacionForm", "line_number": 52, "usage_type": "call"}, {"api_name": "forms.reserva_form.ReservaHabitacionForm", "line_number": 55, "usage_type": "call"}, {"api_name": "forms.reserva_form.ReservaHabitacionForm", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "6303362922", "text": "import os\nimport requests\nimport pymysql\n\ndef connect_to_database():\n config = {\n 'host': 'localhost',\n 'port': 3306,\n 'user': 'root',\n 'password': 'Mvemjsunp9',\n 'database': 'cardscandb'\n }\n\n try:\n conn = pymysql.connect(**config)\n if conn.open:\n print(\"Connected to MySQL database\")\n return conn\n except pymysql.Error as e:\n print(f\"Error connecting to MySQL database: {e}\")\n return None\n\ndef download_image(url, file_path):\n response = requests.get(url, stream=True)\n response.raise_for_status()\n\n with open(file_path, \"wb\") as f:\n for chunk in response.iter_content(chunk_size=8192):\n f.write(chunk)\n\ndef download_card_images(conn, output_folder):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n with conn.cursor() as cursor:\n cursor.execute(\"SELECT id, scryfallId, frameVersion FROM cards\")\n results = cursor.fetchall()\n\n for result in results:\n card_id, scryfall_id, frame_version = result\n if frame_version == \"2015\":\n image_url = f\"https://api.scryfall.com/cards/{scryfall_id}?format=image\"\n\n output_file_path = os.path.join(output_folder, f\"{card_id}.jpg\")\n\n try:\n download_image(image_url, output_file_path)\n print(f\"Downloaded image for card id {card_id} to {output_file_path}\")\n except requests.exceptions.RequestException as e:\n print(f\"Failed to download image for card id {card_id}: {e}\")\n\nif __name__ == \"__main__\":\n conn = connect_to_database()\n if conn:\n output_folder = \"card_images\"\n download_card_images(conn, output_folder)\n conn.close()", "repo_name": "CurorVult/CV-Mat-Sort", "sub_path": "massMTGDownloader.py", "file_name": "massMTGDownloader.py", "file_ext": "py", "file_size_in_byte": 1804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymysql.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "pymysql.Error", "line_number": 19, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "requests.exceptions", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "15077274052", "text": "#!/usr/bin/python3\n\nfrom netaddr import IPNetwork, iter_iprange, glob_to_iprange\nimport re\nimport sys\nimport os.path\nimport ipaddress\n\nclass IPParser:\n def __init__(self, filename):\n self.filename = filename\n\n def is_valid_ip_address(self, ip):\n try:\n ipaddress.ip_address(ip)\n return True\n except ValueError:\n return print(\"Not a valid IP address\")\n\n def process(self, ip_value):\n if re.search('-', ip_value):\n self.dash2(ip_value)\n self.withdash(ip_value)\n elif re.search('/', ip_value):\n for ip in IPNetwork(ip_value).iter_hosts():\n print(ip)\n elif re.search(r'^\\s*$', ip_value):\n pass\n elif re.search('\\*+', ip_value):\n for ip in glob_to_iprange(ip_value):\n print(ip)\n elif len(ip_value) == 0:\n pass\n else:\n if self.is_valid_ip_address(ip_value):\n print(ip_value.lstrip().rstrip())\n\n def withdash(self, ip_value):\n try:\n split_ip = ip_value.split('-')\n firstip, secondip = split_ip[0].lstrip().rstrip(), split_ip[1].lstrip().rstrip()\n listfirstip, listsecondip = list(firstip.split('.')), list(secondip.split('.'))\n start, end = int(listfirstip[3]), int(listsecondip[3])\n first_3_octets = listfirstip[0:3]\n first_3 = \".\".join(str(dot) for dot in first_3_octets)\n\n while start <= end:\n print(first_3 + \".\" + str(start))\n start = start + 1\n except IndexError:\n pass\n\n def dash2(self, ip):\n try:\n pull_ip = ip.split('-')\n if len(pull_ip[1]) <= 3:\n firstip, secondip = pull_ip[0].lstrip().rstrip(), pull_ip[1].lstrip().rstrip()\n listfirstip, listsecondip = list(firstip.split('.')), list(secondip.split('.'))\n start, end = int(listfirstip[3]), int(listsecondip[0])\n first_3_octets = listfirstip[0:3]\n first_3 = \".\".join(str(dot) for dot in first_3_octets)\n\n while start <= end:\n print(first_3 + \".\" + str(start))\n start = start + 1\n\n except IndexError:\n pass\n\n def usage(self):\n print(\"\\n\" + \"Example Usage is: ./ipparser.py \\\"nameoffile.txt\\\"\" + \"\\n\")\n\n def main(self):\n try:\n if not os.path.isfile(self.filename):\n print(\"File not Found\")\n\n with open(self.filename, 'r') as ip_option:\n newlist = [line.strip() for line in ip_option]\n\n for ip in newlist:\n if re.search(',', ip):\n split_ip = ip.split(',')\n for octet in split_ip:\n self.process(octet)\n elif re.search(';', ip):\n split_ip = ip.split(';')\n for octet in split_ip:\n self.process(octet)\n else:\n self.process(ip)\n\n except:\n print(\"You must supply a file to parse\")\n self.usage()\n\n sys.exit(2)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: ./ipparser.py \")\n else:\n ip_parser = IPParser(sys.argv[1])\n ip_parser.main()\n", "repo_name": "bmethvien/Penetration-Testing-Tools", "sub_path": "ipparser.py", "file_name": "ipparser.py", "file_ext": "py", "file_size_in_byte": 3380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ipaddress.ip_address", "line_number": 15, "usage_type": "call"}, {"api_name": "re.search", "line_number": 21, "usage_type": "call"}, {"api_name": "re.search", "line_number": 24, "usage_type": "call"}, {"api_name": "netaddr.IPNetwork", "line_number": 25, "usage_type": "call"}, {"api_name": "re.search", "line_number": 27, "usage_type": "call"}, {"api_name": "re.search", "line_number": 29, "usage_type": "call"}, {"api_name": "netaddr.glob_to_iprange", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 75, "usage_type": "name"}, {"api_name": "re.search", "line_number": 82, "usage_type": "call"}, {"api_name": "re.search", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 104, "usage_type": "attribute"}]} +{"seq_id": "24744442661", "text": "import logging\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom pupil_detectors import Detector2D, DetectorBase, Roi\r\nfrom pyglui import ui\r\n\r\nfrom methods import normalize\r\n\r\nfrom pupil_detector_plugins import available_detector_plugins\r\nfrom pupil_detector_plugins.detector_base_plugin import (\r\n PupilDetectorPlugin,\r\n)\r\nfrom pupil_detector_plugins.visualizer_2d import draw_pupil_outline\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass CustomDetector(PupilDetectorPlugin):\r\n uniqueness = \"by_class\"\r\n icon_font = \"pupil_icons\"\r\n icon_chr = chr(0xEC18)\r\n\r\n label = \"Custom Detector\"\r\n\r\n # Use the same identifier as the built-in 2D pupil detector\r\n identifier = \"2d\"\r\n order = 0.9\r\n\r\n @property\r\n def pretty_class_name(self):\r\n return \"Custom Detector\"\r\n\r\n @property\r\n def pupil_detector(self) -> DetectorBase:\r\n return self.__detector_2d\r\n\r\n def __init__(self, g_pool=None):\r\n super().__init__(g_pool=g_pool)\r\n self.__detector_2d = Detector2D({})\r\n self._stop_other_pupil_detectors()\r\n\r\n def _stop_other_pupil_detectors(self):\r\n plugin_list = self.g_pool.plugins\r\n\r\n # Deactivate other PupilDetectorPlugin instances\r\n for plugin in plugin_list:\r\n if isinstance(plugin, PupilDetectorPlugin) and plugin is not self:\r\n plugin.alive = False\r\n\r\n # Force Plugin_List to remove deactivated plugins\r\n plugin_list.clean()\r\n\r\n def detect(self, frame, **kwargs):\r\n\r\n debug_img = frame.bgr if self.g_pool.display_mode == \"algorithm\" else None\r\n\r\n frame_data = np.asarray(bytearray(frame.jpeg_buffer), dtype=np.uint8)\r\n frame_bgr = cv2.imdecode(frame_data, cv2.IMREAD_COLOR)\r\n\r\n # Convert to grayscale\r\n frame_gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)\r\n\r\n # Apply a blur to reduce noise\r\n blurred_frame = cv2.GaussianBlur(frame_gray, (7, 7), 0)\r\n\r\n # Use Canny Edge Detection\r\n edges = cv2.Canny(blurred_frame, 100, 200)\r\n\r\n # Find contours in the edge map\r\n contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n result = {'ellipse': None, 'diameter': None, 'location': None, 'confidence': 0, 'id': 0, 'topic': None,\r\n 'method': None, 'timestamp': None, 'norm_pos': None}\r\n\r\n if contours:\r\n for contour in contours:\r\n # Calculate the area of the contour\r\n area = cv2.contourArea(contour)\r\n\r\n # Filter out very small contours based on their area\r\n if area > 100:\r\n # Fit a circle to the contour\r\n (x, y), radius = cv2.minEnclosingCircle(contour)\r\n center = (int(x), int(y))\r\n radius = int(radius)\r\n\r\n # Draw the circle on the image\r\n cv2.circle(frame_bgr, center, radius, (0, 255, 0), 2)\r\n\r\n # Update the result\r\n result['ellipse'] = {'center': (x, y), 'axes': (radius, radius), 'angle': 0}\r\n result['diameter'] = radius * 2 # The diameter of the circle\r\n result['location'] = (x, y) # The center of the circle\r\n result['confidence'] = 1 # Confidence is set to 1 for now\r\n\r\n eye_id = self.g_pool.eye_id\r\n\r\n result[\"id\"] = eye_id\r\n result[\"topic\"] = f\"pupil.{eye_id}.{self.identifier}\"\r\n result[\"method\"] = \"custom-2d\"\r\n result[\"timestamp\"] = frame.timestamp\r\n if result['location'] is not None:\r\n result[\"norm_pos\"] = normalize(result[\"location\"], (frame.width, frame.height), flip_y=True)\r\n\r\n ##with open(r'C:\\Users\\L1303\\Desktop\\pupilSource\\output.txt', 'w') as f:\r\n ## f.write(str(result))\r\n\r\n return result\r\n\r\n def init_ui(self):\r\n super().init_ui()\r\n self.menu.label = self.pretty_class_name\r\n self.menu_icon.label_font = \"pupil_icons\"\r\n info = ui.Info_Text(\"Custom 2D Pupil Detector Plugin\")\r\n self.menu.append(info)\r\n\r\n def gl_display(self):\r\n if self._recent_detection_result:\r\n draw_pupil_outline(self._recent_detection_result, color_rgb=(0.3, 1.0, 0.1))\r\n", "repo_name": "reddote/CustomPupil", "sub_path": "custom_2d.py", "file_name": "custom_2d.py", "file_ext": "py", "file_size_in_byte": 4258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "pupil_detector_plugins.detector_base_plugin.PupilDetectorPlugin", "line_number": 19, "usage_type": "name"}, {"api_name": "pupil_detectors.DetectorBase", "line_number": 35, "usage_type": "name"}, {"api_name": "pupil_detectors.Detector2D", "line_number": 40, "usage_type": "call"}, {"api_name": "pupil_detector_plugins.detector_base_plugin.PupilDetectorPlugin", "line_number": 48, "usage_type": "argument"}, {"api_name": "numpy.asarray", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.minEnclosingCircle", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 89, "usage_type": "call"}, {"api_name": "methods.normalize", "line_number": 104, "usage_type": "call"}, {"api_name": "pyglui.ui.Info_Text", "line_number": 115, "usage_type": "call"}, {"api_name": "pyglui.ui", "line_number": 115, "usage_type": "name"}, {"api_name": "pupil_detector_plugins.visualizer_2d.draw_pupil_outline", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "32350412405", "text": "\"\"\" Module basemodel.py (By: Charley Zhang, July 2020)\nImplements basic functionality for all models subclasses.\n\"\"\"\n\nimport sys, os\nimport torch, torch.nn as nn\nimport torchsummary\n\n\nclass BaseModel(nn.Module):\n r\"\"\" Pytorch basemodel with useful customized functionalities.\"\"\"\n def __init__(self, *args, **kwargs):\n super(BaseModel, self).__init__(*args, **kwargs)\n\n def forward(self, *args):\n raise NotImplementedError(f\"forward() func requires definition.\")\n\n @property\n def device(self):\n return next(self.parameters()).device if self.parameters() else None\n\n @property\n def param_counts(self):\n tot_params = sum(p.numel() for p in self.parameters())\n tot_train_params = sum(p.numel() for p in self.parameters()\n if p.requires_grad\n )\n return tot_params, tot_train_params\n\n @property\n def size(self):\n r\"\"\" Gets total parameter and buffer memory usage in bytes. \"\"\"\n params_mem = sum(\n [p.nelement() * p.element_size() for p in self.parameters()]\n )\n bufs_mem = sum(\n [buf.nelement() * buf.element_size() for buf in self.buffers()]\n )\n return params_mem + bufs_mem \n\n def summary(self, input_size=(3,256,256), batch_size=-1, device='cpu'):\n if 'cuda' in device:\n device = 'cuda' # summary does not support targ device assignment\n torchsummary.summary(\n self, \n input_size=input_size, \n batch_size=batch_size,\n device=device\n )\n\n \n ", "repo_name": "charzharr/3D-medseg-pretraining", "sub_path": "src/lib/nets/basemodel.py", "file_name": "basemodel.py", "file_ext": "py", "file_size_in_byte": 1577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torchsummary.summary", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "40931364387", "text": "import csv\nimport numpy as np\nfrom numpy import genfromtxt\nimport json \nimport time\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\n# Brownian Motion\n\ndef GeometricBrownianMotion(start_time, end_time, time_increment, start_price, mu, sigma):\n ts = []\n Bs = [0.0]\n # 0 to 100s with 100 points\n ts = np.arange(start_time, end_time, time_increment)\n delta = ts[1] - ts[0] # The period\n \n for t in ts[1::]:\n variance = delta\n n = np.random.normal(0, np.sqrt(variance))\n Bs.append(Bs[-1] + n)\n \n # Geometric Brownian Motion\n St = [start_price]\n for i in range(1,len(ts)):\n Sti = St[0] * np.exp( (mu - (sigma**2.0)/2.0 ) * ts[i] + sigma*Bs[i] )\n St.append(Sti)\n \n return (ts, St)\n \n\n(ts, St) = GeometricBrownianMotion(0.0, 1.0, 0.001, 1.0, 2.0, 1.0)\nplt.plot(ts, St)\nplt.show()\n\n", "repo_name": "stefaj/BTCHASHCOR", "sub_path": "geom.py", "file_name": "geom.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "9690126717", "text": "import json\nimport logging\nimport util\nfrom conf import setting\nfrom flow.data.ffprobe import *\nfrom plot.media_info import MediaInfoPlot\nfrom conf import setting\n\n\nclass MediaInfoFlow:\n def __init__(self, filename, codec_type: str = None, stream_index: int = None, read_intervals: str = None):\n self.filename = filename\n self.codec_type = codec_type\n self.stream_index = stream_index\n self.read_intervals = read_intervals\n self.data = FFprobeData()\n self.data.format = FFprobeFormat(**self._get_format())\n if self.data.format.nb_streams:\n v_index = a_index = 0\n streams = self._get_streams(codec_type, stream_index)\n for stream in streams:\n is_video = bool(\"video\" in stream[\"codec_type\"])\n is_audio = bool(\"audio\" in stream[\"codec_type\"])\n if is_video:\n v_stream = FFprobeVideoStream(**stream)\n frames = self._get_frames(v_stream.codec_type, v_index)\n for frame in frames:\n v_frame = FFprobeVideoFrame(**frame)\n v_stream.frames.append(v_frame)\n packets = self._get_packets(v_stream.codec_type, v_index)\n for packet in packets:\n v_packet = FFprobeVideoPacket(**packet)\n v_stream.packets.append(v_packet)\n self.data.video.streams.append(v_stream)\n v_index += 1\n if is_audio:\n a_stream = FFprobeAudioStream(**stream)\n frames = self._get_frames(a_stream.codec_type, a_index)\n for frame in frames:\n a_frame = FFprobeAudioFrame(**frame)\n a_stream.frames.append(a_frame)\n packets = self._get_packets(a_stream.codec_type, a_index)\n for packet in packets:\n a_packet = FFprobeAudioPacket(**packet)\n a_stream.packets.append(a_packet)\n self.data.audio.streams.append(a_stream)\n a_index += 1\n\n def _get_select_stream_option(self, codec_type, stream_index):\n if not codec_type:\n return str()\n codec_flag = None\n if \"video\" in codec_type:\n codec_flag = \"v\"\n elif \"audio\" in codec_type:\n codec_flag = \"a\"\n if not codec_flag or stream_index is None:\n return str()\n return f\"-select_streams {codec_flag}:{stream_index}\"\n\n def _get_read_intervals_option(self):\n return f\"-read_intervals {self.read_intervals}\"\n\n def _get_option(self, codec_type, stream_index, extra_options):\n select_stream_opt = self._get_select_stream_option(codec_type, stream_index)\n read_intervals_opt = self._get_read_intervals_option()\n return f\"{select_stream_opt} {read_intervals_opt} {extra_options} -of json {self.filename}\"\n\n def _get_format(self):\n r = util.XPipe(f\"{setting.bin_ffprobe} -show_format -of json {self.filename}\").run()\n if r[\"code\"]:\n logging.info(r)\n return dict()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"format\"]\n\n def _get_streams(self, codec_type, stream_index):\n options = self._get_option(codec_type, stream_index, \"-show_streams\")\n r = util.XPipe(f\"{setting.bin_ffprobe} {options}\").run()\n if r[\"code\"]:\n logging.info(r)\n return list()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"streams\"]\n\n def _get_frames(self, codec_type, stream_index):\n options = self._get_option(codec_type, stream_index, \"-show_frames\")\n r = util.XPipe(f\"{setting.bin_ffprobe} {options}\").run()\n if r[\"code\"]:\n logging.info(r)\n return dict()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"frames\"]\n\n def _get_packets(self, codec_type, stream_index):\n options = self._get_option(codec_type, stream_index, \"-show_packets\")\n r = util.XPipe(f\"{setting.bin_ffprobe} {options}\").run()\n if r[\"code\"]:\n logging.info(r)\n return dict()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"packets\"]\n\n\nif __name__ == \"__main__\":\n flow = MediaInfoFlow(\"/opt/ffmpeg/sample/dota2/10-20.flv\", read_intervals=\"%+#30\")\n #logging.info(flow.data.dict())\n plot = MediaInfoPlot(flow.data)\n plot.show(10, 10)\n plot.save(10, 10, f\"{setting.dir_workspace}/plot.png\")\n\n", "repo_name": "imssyang/python3", "sub_path": "app/av-tool/source/flow/media_info.py", "file_name": "media_info.py", "file_ext": "py", "file_size_in_byte": 4561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "util.XPipe", "line_number": 70, "usage_type": "call"}, {"api_name": "conf.setting.bin_ffprobe", "line_number": 70, "usage_type": "attribute"}, {"api_name": "conf.setting", "line_number": 70, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 72, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "util.XPipe", "line_number": 79, "usage_type": "call"}, {"api_name": "conf.setting.bin_ffprobe", "line_number": 79, "usage_type": "attribute"}, {"api_name": "conf.setting", "line_number": 79, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 81, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "util.XPipe", "line_number": 88, "usage_type": "call"}, {"api_name": "conf.setting.bin_ffprobe", "line_number": 88, "usage_type": "attribute"}, {"api_name": "conf.setting", "line_number": 88, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 92, "usage_type": "call"}, {"api_name": "util.XPipe", "line_number": 97, "usage_type": "call"}, {"api_name": "conf.setting.bin_ffprobe", "line_number": 97, "usage_type": "attribute"}, {"api_name": "conf.setting", "line_number": 97, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 99, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 101, "usage_type": "call"}, {"api_name": "flow.data.ffprobe", "line_number": 106, "usage_type": "name"}, {"api_name": "plot.media_info", "line_number": 108, "usage_type": "name"}, {"api_name": "plot.media_info.MediaInfoPlot", "line_number": 108, "usage_type": "call"}, {"api_name": "flow.data.ffprobe.data", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flow.data.ffprobe", "line_number": 108, "usage_type": "name"}, {"api_name": "plot.media_info.show", "line_number": 109, "usage_type": "call"}, {"api_name": "plot.media_info", "line_number": 109, "usage_type": "name"}, {"api_name": "plot.media_info.save", "line_number": 110, "usage_type": "call"}, {"api_name": "plot.media_info", "line_number": 110, "usage_type": "name"}, {"api_name": "conf.setting.dir_workspace", "line_number": 110, "usage_type": "attribute"}, {"api_name": "conf.setting", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "72734063235", "text": "from argparse import ArgumentParser\nimport sys\nimport logging\nimport pkg_resources\nimport csv\nfrom quicksect import IntervalTree\nimport networkx as nx\nfrom pathlib import Path\nfrom copy import copy\nimport re\n\n\nEXIT_FILE_IO_ERROR = 1\nEXIT_COMMAND_LINE_ERROR = 2\nEXIT_TSV_FILE_ERROR = 3\nDEFAULT_VERBOSE = False\nDEFAULT_OVERLAP = 0.75 \nPROGRAM_NAME = \"cnvmerge\"\n\n\ntry:\n PROGRAM_VERSION = pkg_resources.require(PROGRAM_NAME)[0].version\nexcept pkg_resources.DistributionNotFound:\n PROGRAM_VERSION = \"undefined_version\"\n\n\ndef exit_with_error(message, exit_status):\n '''Print an error message to stderr, prefixed by the program name and 'ERROR'.\n Then exit program with supplied exit status.\n\n Arguments:\n message: an error message as a string.\n exit_status: a positive integer representing the exit status of the\n program.\n '''\n logging.error(message)\n print(\"{} ERROR: {}, exiting\".format(PROGRAM_NAME, message), file=sys.stderr)\n sys.exit(exit_status)\n\n\ndef parse_args():\n '''Parse command line arguments.\n Returns Options object with command line argument values as attributes.\n Will exit the program on a command line error.\n '''\n description = 'Merge distilled SVs'\n parser = ArgumentParser(description=description)\n parser.add_argument('--version',\n action='version',\n version='%(prog)s ' + PROGRAM_VERSION)\n parser.add_argument('--log',\n metavar='LOG_FILE',\n type=str,\n help='record program progress in LOG_FILE')\n parser.add_argument('--overlap',\n metavar='PERCENTAGE',\n default=DEFAULT_OVERLAP,\n type=float,\n help='percentage overlap for CNV equality (default {})'.format(DEFAULT_OVERLAP))\n parser.add_argument('tsv_files',\n nargs='*',\n metavar='TSV_FILE',\n type=str,\n help='Input TSV files')\n return parser.parse_args()\n\n\nclass CNVIntervals(object):\n def __init__(self):\n self.chroms = {}\n\n def add(self, chrom, start, end, val):\n if chrom not in self.chroms:\n self.chroms[chrom] = IntervalTree()\n tree = self.chroms[chrom]\n tree.add(start, end, val)\n\n def lookup(self, chrom, start, end):\n if chrom in self.chroms:\n return self.chroms[chrom].search(start, end)\n else:\n return [] \n\n\n# mapping from unique integer (count) to variant record\nclass Variants(object):\n def __init__(self):\n self.variants = {}\n self.count = 0\n\n def add(self, variant):\n self.variants[self.count] = variant\n self.count += 1\n\ndef get_sample_name(filepath):\n fields = filepath.split('.')\n if len(fields) > 0:\n sample = fields[0]\n else:\n sample = filepath\n return sample\n\ndef read_tsv_files(options):\n sample_ids = set()\n variants = Variants()\n for tsv_filename in options.tsv_files:\n logging.info(\"Processing TSV file from %s...\", tsv_filename)\n sample = get_sample_name(tsv_filename)\n sample_ids.add(sample)\n with open(tsv_filename) as file:\n reader = csv.DictReader(file, delimiter=\"\\t\")\n for row in reader:\n row['sample'] = sample\n variants.add(row)\n logging.info(\"Processing TSV file from %s: done\", tsv_filename)\n return sample_ids, variants\n\n\ndef cnv_intervals(variants):\n logging.info(\"Computing %i CNV intervals\", len(variants))\n intervals = CNVIntervals()\n for idx, (variant_id, variant_info) in enumerate(variants.items()):\n chrom = variant_info['chr']\n start = int(float(variant_info['start']))\n end = int(float(variant_info['end']))\n intervals.add(chrom, start, end, variant_id)\n if (idx + 1) % 100000 == 0:\n logging.info('Computing %i CNV intervals: %i done', len(variants), idx + 1)\n logging.info(\"Computing %i CNV intervals, done\", len(variants))\n return intervals\n\n\ndef is_overlap(start1, end1, start2, end2, min_overlap):\n overlap_start = max(start1, start2)\n overlap_end = min(end1, end2)\n if overlap_start < overlap_end:\n overlap_size = float((overlap_end - overlap_start) + 1)\n cnv1_size = (end1 - start1) + 1\n cnv2_size = (end2 - start2) + 1\n cnv1_overlap = overlap_size / cnv1_size\n cnv2_overlap = overlap_size / cnv2_size\n return cnv1_overlap >= min_overlap and cnv2_overlap >= min_overlap\n return False\n\ndef get_intersections(overlap, variants, intervals):\n logging.info(\"Computing %i CNV intersections...\", len(variants))\n overlaps = nx.Graph() \n for idx, (variant_id, variant_info) in enumerate(variants.items()):\n # make sure all variants are recorded in the graph\n overlaps.add_node(variant_id)\n chrom = variant_info['chr']\n start = int(float(variant_info['start']))\n end = int(float(variant_info['end']))\n this_state = variant_info['state'] \n intersections = { i for i in intervals.lookup(chrom, start, end) }\n for other_variant in intersections:\n other_variant_id = other_variant.data\n other_variant_info = variants[other_variant_id]\n # don't add self edges\n if variant_id != other_variant_id and \\\n is_overlap(start, end, other_variant.start, other_variant.end, overlap) and \\\n this_state == other_variant_info['state']:\n overlaps.add_edge(variant_id, other_variant_id)\n if (idx + 1) % 100000 == 0:\n logging.info(\"Computing %i variant intersections: %i done\", len(variants), idx + 1)\n logging.info(\"Computing %i variant intersections: done\", len(variants))\n return overlaps\n\n\ndef list_median(items):\n mid_pos = len(items) // 2\n return sorted(items)[mid_pos] \n\ndef average(items):\n return (sum(items) / len(items))\n\ndef build_evidence(variants, samples):\n # evidence: mapping, sample -> set(caller)\n num_positive_samples = 0\n evidence = set()\n for var in variants:\n this_sample = var['sample']\n evidence.add(this_sample)\n results = []\n for sample in samples:\n if sample in evidence:\n num_positive_samples += 1\n results.append(1)\n else:\n results.append(0)\n return num_positive_samples, results\n\n\ndef merge_overlaps(sample_ids, variants, overlaps):\n logging.info(\"Merging overlapping variants...\")\n writer = csv.writer(sys.stdout, delimiter=\"\\t\")\n sorted_samples = sorted(sample_ids)\n header = [\"chr\", \"start\", \"end\", \"state\", \"median\", \"num pos samples\"] + sorted_samples \n writer.writerow(header)\n for component in nx.connected_components(overlaps):\n if len(component) > 0:\n variant_infos = [variants[id] for id in component]\n first_info = variant_infos[0]\n chrom = first_info['chr']\n state = first_info['state']\n start = min([int(float(info['start'])) for info in variant_infos])\n end = max([int(float(info['end'])) for info in variant_infos])\n avg_median = average([float(info['median']) for info in variant_infos])\n num_positive_samples, evidence = build_evidence(variant_infos, sorted_samples)\n writer.writerow([chrom, start, end, state, avg_median, num_positive_samples] + evidence) \n logging.info(\"Merging overlapping variants: done\")\n\n\ndef init_logging(log_filename):\n '''If the log_filename is defined, then\n initialise the logging facility, and write log statement\n indicating the program has started, and also write out the\n command line from sys.argv\n\n Arguments:\n log_filename: either None, if logging is not required, or the\n string name of the log file to write to\n Result:\n None\n '''\n if log_filename is None:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s - %(message)s',\n datefmt='%m-%d-%Y %H:%M:%S')\n else:\n logging.basicConfig(filename=log_filename,\n level=logging.DEBUG,\n filemode='w',\n format='%(asctime)s %(levelname)s - %(message)s',\n datefmt='%m-%d-%Y %H:%M:%S')\n logging.info('computation started')\n logging.info('command line: %s', ' '.join(sys.argv))\n\n\ndef main():\n \"Orchestrate the execution of the program\"\n options = parse_args()\n init_logging(options.log)\n sample_ids, variants = read_tsv_files(options)\n intervals = cnv_intervals(variants.variants)\n overlaps = get_intersections(options.overlap, variants.variants, intervals)\n merge_overlaps(sample_ids, variants.variants, overlaps)\n logging.info(\"computation ended\")\n\n\n# If this script is run from the command line then call the main function.\nif __name__ == '__main__':\n main()\n", "repo_name": "bjpop/svdistil", "sub_path": "svdistil/cnvmerge.py", "file_name": "cnvmerge.py", "file_ext": "py", "file_size_in_byte": 9096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pkg_resources.require", "line_number": 22, "usage_type": "call"}, {"api_name": "pkg_resources.DistributionNotFound", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 38, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 47, "usage_type": "call"}, {"api_name": "quicksect.IntervalTree", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 107, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 120, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 146, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 147, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 165, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 166, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 195, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 196, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 196, "usage_type": "attribute"}, {"api_name": "networkx.connected_components", "line_number": 200, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 211, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 227, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 227, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 231, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 232, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 236, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 237, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 237, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "38090412289", "text": "from django.urls import path\nfrom . import data_receive, data_view, register, command, base, delete, measure\n\n# Urls that direct to corresponding view and function.\nurlpatterns = [\n path('', base.index, name = 'base'),\n path('send/', data_receive.index, name = 'send'),\n path('register/', register.index, name = 'register'),\n path('command/', command.index, name = 'command'),\n path('command///', command.command),\n path('data/', data_view.index, name = 'data'),\n path('data//', data_view.client_data),\n path('delete/', delete.index, name = 'delete'),\n path('delete//', delete.delete),\n path('measure//', measure.index, name = 'measure'),\n path('measure/auto//', measure.auto),\n path('measure/auto//status/', measure.auto_status)\n]\n", "repo_name": "Scurvide/rasp-www-host", "sub_path": "Datamana/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "3567124451", "text": "import re\r\nimport os\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.util import ngrams\r\nfrom collections import Counter\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport math\r\n\r\n# Stopwords are collected from txt file\r\nwith open(\"stopwords3.txt\") as f:\r\n stopWords = f.read().splitlines()\r\nstopWords = set(stopWords)\r\n\r\ndef create_data_with_vectorizer(vectorizer, file_name):\r\n '''\r\n return: data set values and labels for datasets\r\n '''\r\n text_data = [] # text list\r\n labels = [] # label list\r\n path_name = \"./\" + file_name\r\n files = os.listdir(path_name) \r\n for file in files:\r\n with open(os.path.join(path_name, file), 'r', encoding=\"latin1\") as f: # file is opened\r\n text_data.append(f.read().lower()) # text data of the file is collected\r\n review_type = file[file.find(\".\")-1:file.find(\".\")] # review type is etracted from file name\r\n # correct review type is appended to labels list\r\n if review_type == \"N\":\r\n labels.append(-1)\r\n elif review_type == \"Z\":\r\n labels.append(0)\r\n else:\r\n labels.append(1)\r\n # vectorizing operation done\r\n if file_name == \"TRAIN\":\r\n text_data = vectorizer.fit_transform(text_data)\r\n elif file_name == \"VAL\":\r\n text_data = vectorizer.transform(text_data)\r\n\r\n return text_data,labels\r\n\r\nif __name__ == '__main__':\r\n vectorizer = TfidfVectorizer(stop_words=stopWords, lowercase=True, use_idf=True, smooth_idf=True, max_features=4000) # initialized vectorizer \r\n train_data_vectorizer,train_labels_vectorizer = create_data_with_vectorizer(vectorizer,\"TRAIN\") # train data is created\r\n # train data vectorizer is pickled\r\n filename = 'train_data.pickle'\r\n outfile = open(filename, 'wb')\r\n pickle.dump(train_data_vectorizer, outfile)\r\n outfile.close()\r\n # train labels list is pickled\r\n filename = 'train_labels.pickle'\r\n outfile = open(filename, 'wb')\r\n pickle.dump(train_labels_vectorizer, outfile)\r\n outfile.close() \r\n # vectorizer pickled\r\n filename = 'vectorizer.pickle'\r\n outfile = open(filename, 'wb')\r\n pickle.dump(vectorizer, outfile)\r\n outfile.close() \r\n\r\n # validation data and labels created\r\n validation_data_vectorizer,validation_labels_vectorizer = create_data_with_vectorizer(vectorizer,\"VAL\")\r\n # validation data is pickled\r\n filename = 'validation_data.pickle'\r\n outfile = open(filename, 'wb')\r\n pickle.dump(validation_data_vectorizer, outfile)\r\n outfile.close()\r\n # validation labels is pickled\r\n filename = 'validation_labels.pickle'\r\n outfile = open(filename, 'wb')\r\n pickle.dump(validation_labels_vectorizer, outfile)\r\n outfile.close() \r\n\r\n", "repo_name": "melihaydogd/sentimental-analysis", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 45, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 50, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 55, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 60, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 68, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "16165949394", "text": "import os\nfrom multiprocessing import Process, Value, Array\n\nPROCS = 3\ncount = 0\n\ndef showdata(scalar, vector, label=''):\n ''' show the information in shared objects \n count(global object: not shared within processes, each have their own copy)\n scalar(Value object: shared, acts as a single value)\n vector(Array object: shared, acts as a shared list)\n '''\n msg = \"%-12s pid: %s, global: %s, scalar: %s, vector: %s\"\n print(msg % (label, os.getpid(), count, scalar.value, list(vector)))\n\ndef update(scalar, vector):\n ''' add 1 to the shared objects '''\n global count\n count += 1\n scalar.value += 1\n for i in range(len(vector)): vector[i] += 1\n\nif __name__ == '__main__':\n scalar = Value('i', 0)\n vector = Array('d', PROCS)\n\n showdata(scalar, vector, \"Start value in parent\")\n\n print(\"\\nShowing data in child\")\n # spawn child and show shared memory\n p = Process(target=showdata, args=(scalar, vector))\n p.start(); p.join() # start and join\n\n print(\"\\nUpdate in parent and show in child, serially\")\n # update in parent, show in spawned process\n for i in range(PROCS):\n count += 1\n scalar.value += 1\n vector[i] += 1\n p = Process(target=showdata, args=(scalar, vector))\n p.start(); p.join()\n\n print(\"\\nLoop2: Update in parent and show in child when run parallely\")\n processes = []\n for i in range(PROCS):\n count +=1 \n scalar.value += 1\n vector[i] += 1\n p = Process(target=showdata, args=(scalar, vector))\n processes.append(p)\n p.start()\n for process in processes: process.join()\n \n print(\"\\nLoop3: Update in children serially, show in parent\")\n for i in range(PROCS):\n p = Process(target=update, args=(scalar, vector))\n p.start(); p.join()\n showdata(scalar, vector)\n\n print(\"\\nLoop4: Update in children parallely, show in parent\")\n processes = []\n for i in range(PROCS):\n p = Process(target=update, args=(scalar, vector))\n p.start()\n processes.append(p)\n for process in processes: process.join()\n\n # showing results\n showdata(scalar, vector)\n\n", "repo_name": "ananyo141/ProgrammingPython", "sub_path": "System/IPC/multi3.py", "file_name": "multi3.py", "file_ext": "py", "file_size_in_byte": 2160, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.getpid", "line_number": 14, "usage_type": "call"}, {"api_name": "multiprocessing.Value", "line_number": 24, "usage_type": "call"}, {"api_name": "multiprocessing.Array", "line_number": 25, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 31, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 40, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 49, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 56, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "8093692488", "text": "import os\nimport json\nimport torch\n\nfrom convinse.library.string_library import StringLibrary as string_lib\nfrom convinse.library.utils import extract_mapping_incomplete_complete\n\ndef input_to_text(history_turns, current_turn, history_separator):\n \"\"\"\n Transform the relevant turns and current turn into the input text.\n \"\"\"\n history_text = history_separator.join(\n [_history_turn_to_text(history_turn, history_separator) for history_turn in history_turns]\n )\n\n # create input\n current_question = current_turn[\"question\"]\n input_text = f\"{history_text}{history_separator}{current_question}\"\n return input_text\n\n\ndef _history_turn_to_text(history_turn, history_separator):\n \"\"\"\n Transform the given history turn to text.\n \"\"\"\n question = history_turn[\"question\"]\n answers = history_turn[\"answers\"]\n answers_text = \" \".join([answer[\"label\"] for answer in answers])\n history_turn_text = f\"{question}{history_separator}{answers_text}\"\n return history_turn_text\n \n\nclass DatasetQuestionRewriting(torch.utils.data.Dataset):\n def __init__(self, config, tokenizer, path):\n self.config = config\n self.tokenizer = tokenizer\n self.history_separator = config[\"history_separator\"]\n\n benchmark_path = config[\"benchmark_path\"]\n train_path = os.path.join(benchmark_path, config[\"train_input_path\"])\n dev_path = os.path.join(benchmark_path, config[\"dev_input_path\"])\n data_paths = [train_path, dev_path]\n self.mapping_incomplete_to_complete = extract_mapping_incomplete_complete(data_paths)\n\n input_encodings, output_encodings, dataset_length = self._load_data(path)\n self.input_encodings = input_encodings\n self.output_encodings = output_encodings\n self.dataset_length = dataset_length\n\n def __getitem__(self, idx):\n item = {key: torch.tensor(val[idx]) for key, val in self.input_encodings.items()}\n labels = self.output_encodings[\"input_ids\"][idx]\n item = {\n \"input_ids\": item[\"input_ids\"],\n \"attention_mask\": item[\"attention_mask\"],\n \"labels\": labels,\n }\n return item\n\n def __len__(self):\n return self.dataset_length\n\n def _load_data(self, path):\n \"\"\"\n Opens the file, and loads the data into\n a format that can be put into the model.\n\n The whole history is given as input.\n The complete question, as annotated in the dataset,\n is the gold output.\n \"\"\"\n # open data\n with open(path, \"r\") as fp:\n dataset = json.load(fp)\n\n inputs = list()\n outputs = list()\n\n for conversation in dataset:\n history = list()\n for turn in conversation[\"questions\"]:\n # skip initial turn: no rewrite required!\n if turn[\"turn\"] == 0:\n continue\n\n # create input\n inputs.append(input_to_text(history, turn, self.history_separator))\n\n # create output\n question = turn[\"question\"]\n complete = self.mapping_incomplete_to_complete.get(question)\n outputs.append(complete)\n\n # append to history\n history.append(turn)\n\n input_encodings = self.tokenizer(\n inputs, padding=True, truncation=True, max_length=self.config[\"qrew_max_input_length\"]\n )\n output_encodings = self.tokenizer(\n outputs, padding=True, truncation=True, max_length=self.config[\"qrew_max_input_length\"]\n )\n dataset_length = len(inputs)\n\n return input_encodings, output_encodings, dataset_length\n", "repo_name": "GracePeterMutiibwa/CONVINSE", "sub_path": "convinse/question_understanding/question_rewriting/dataset_question_rewriting.py", "file_name": "dataset_question_rewriting.py", "file_ext": "py", "file_size_in_byte": 3698, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "convinse.library.utils.extract_mapping_incomplete_complete", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "json.load", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "38951328081", "text": "import sys\nimport os\nfrom subprocess import Popen, PIPE, STDOUT\nimport wikipedia\nimport shutil\nimport time\nfrom datetime import datetime\nfrom haystack import Finder\nfrom haystack.preprocessor.cleaning import clean_wiki_text\nfrom haystack.preprocessor.utils import convert_files_to_dicts\nfrom haystack.preprocessor.preprocessor import PreProcessor\nfrom haystack.file_converter.txt import TextConverter\nfrom haystack.retriever.sparse import ElasticsearchRetriever\nfrom haystack.retriever.sparse import TfidfRetriever\nfrom haystack.reader.farm import FARMReader\nfrom haystack.reader.transformers import TransformersReader\nfrom haystack.utils import print_answers\nimport certifi\nfrom haystack.document_store.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.document_store.memory import InMemoryDocumentStore\nimport json\nimport socket\nimport jsonbin\n\nimport spacy # NOTE pip install -U spacy==2.1.0\n\t\t\t # python -m spacy download en\nimport neuralcoref # pip install neuralcoref\nsys.path.append( '.' ) \nfrom coref import Coreference\n\nclient = jsonbin.Client('###')\n\n\narguments = sys.argv\nport_to_use = int(arguments[1])\n\n\ndef json_bin(*args):\n\twhile True:\n\t\tif len(args) == 2:\n\t\t\ttry:\n\t\t\t\tclient.store(args[0], args[1])\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tpass\n\t\telif len(args) == 1:\n\t\t\ttry:\n\t\t\t\tx = client.retrieve(args[0])\n\t\t\t\treturn x\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t\n\nroot = os.path.dirname(os.path.abspath(__file__))\n\n# Get the title of the book from the json file\n\ndef initiate():\n\twhile True:\n\t\tstatus = json_bin(\"model_status\")\n\t\tif status == \"loading\":\n\t\t\treturn json_bin(\"bookname\")\n\t\t\n\nbook_title = initiate()\n\ndef top_50_wiki_results_2(book_title):\n\t# Function to fetch relevant documents given a book title\n\n\tif os.path.isdir(root + \"/documents\"):\n\t\tshutil.rmtree(root + '/documents')\n\tos.mkdir(root + '/documents')\n\n\tpage_counter = 1\n\n\ttitles = []\n\ttitles.append(wikipedia.search(book_title, results=5))\n\ttitles.append(wikipedia.search(book_title + ' character', results=5))\n\n\tres = []\n\t[res.append(x) for x in titles if x not in res]\n\ttitles = res[-1]\n\n\ttitle_exclusions = (\"film)\", \"disambiguation)\", \"actor)\", \"actress)\",\n\t\t\"album)\", \"soundtrack)\", \"TV series)\", \"board game)\", \"video game)\",\n\t\t\"episode)\", \"illusionist)\", \"musical)\", \"TV serial)\",\n\t\t\"magician)\", \"comedian)\", \"magic trick)\", \"filmmaker)\", \"illusion)\",\n\t\t\"manga)\", \"play)\", \"song)\", \"opera)\", \"film series)\", \"miniseries)\")\n\n\tfirst_sent_exclusions = (\"actor\", \"actress\")\n\n\tfor title in titles:\n\t\tif not any(x in title for x in title_exclusions):\n\t\t\ttry:\n\t\t\t\tpage = wikipedia.page(title, auto_suggest=False)\n\t\t\t\tfirst_sentence = page.summary.split('.')[0]\n\t\t\t\tif not any(x in first_sentence for x in first_sent_exclusions):\n\t\t\t\t\tcontent = page.content\n\t\t\t\t\tpath = os.path.join(root + '/documents', str(page_counter)+'.txt')\n\t\t\t\t\tf = open(path, 'w', encoding='utf-8')\n\t\t\t\t\tf.write(content)\n\t\t\t\t\tf.close()\n\t\t\t\t\tprint('Created document number ' + str(page_counter)\n\t\t\t\t\t\t+ ' from page ' + title)\n\t\t\t\t\tpage_counter += 1\n\n\t\t\texcept:\n\t\t\t\tpass\n\n\treturn page_counter\n\nif __name__ == \"__main__\":\n\treader_name = \"deepset/roberta-base-squad2\"\n\ttop_k_retriever = 7\n\ttop_k_reader = 1\n\tconversational = 'True'\n\n\t# Use transfromer reader\n\treader = FARMReader(model_name_or_path=reader_name,\n\t\tuse_gpu=True)\n\n\tprint('Fetching documents for book ' + book_title)\n\tdocument_fetcher_func = top_50_wiki_results_2\n\tnum_docs = document_fetcher_func(book_title)\n\n\tprint('Fetched ' + str(num_docs) + ' documents for book ' + book_title)\n\n\tdocument_store = ElasticsearchDocumentStore(host=\"localhost\", username=\"\", password=\"\", index=\"default\")\n\tdocument_store.delete_all_documents(index=\"default\")\n\t#document_store = InMemoryDocumentStore()\n\n\tdoc_dir = root + \"/documents\"\n\tdicts = convert_files_to_dicts(dir_path=doc_dir,\n\t\tclean_func=clean_wiki_text, split_paragraphs=True)\n\n\t# Add documents to the document store\n\tdocument_store.write_documents(dicts)\n\n\t# Use ElasticsearchRetriever\n\tretriever = ElasticsearchRetriever(document_store=document_store)\n\t#retriever = TfidfRetriever(document_store=document_store)\n\n\tfinder = Finder(reader, retriever)\n\ttop_k_reader = 1\n\n\tif conversational == \"True\":\n\t\tcoref_model = Coreference()\n\t\n\tjson_bin(\"model_status\", \"online\")\n\n\twhile True:\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM,)\n\t\ts.bind((\"localhost\", int(port_to_use)))\n\t\ts.listen(1)\n\t\tconn, addr = s.accept()\n\t\tdata = conn.recv(1024)\n\t\tconn.close()\n\t\tquestion = data.decode()\n\t\tif question == \"EXIT\":\n\t\t\tjson_bin(\"model_status\", \"offline\")\n\t\t\tsys.exit()\n\t\tprint(\"Initial question \", question)\n\n\t\tif conversational == \"True\":\n\t\t\tquestion = coref_model.resolve_question(question)\n\n\t\tprint(\"Input question \", question)\n\n\t\tbegin = time.time()\n\t\tprediction = finder.get_answers(question=question,\n\t\t\ttop_k_retriever=top_k_retriever,\n\t\t\ttop_k_reader=top_k_reader)\n\n\t\tj = prediction\n\n\t\ttry:\n\t\t\tif j['answers'][0]['answer']:\n\t\t\t\tanswer = j['answers'][0]['answer']\n\t\t\t\tprobability = j['answers'][0]['probability']\n\t\t\t\tscore = j['answers'][0]['score']\n\t\t\telse:\n\t\t\t\tanswer=\"I don't know the answer unfortunately\"\n\t\t\t\tprobability=0\n\t\t\t\tscore=-1\n\t\texcept:\n\t\t\tanswer=\"I don't know the answer unfortunately\"\n\t\t\tprobability=0\n\t\t\tscore=-1\n\t\t\n\t\tend = time.time()\n\t\t\n\t\tjson_bin(\"duration\", end)\n\t\tjson_bin(\"NLP_Confidence\", score)\n\n\n\t\tprint(\"Answer: \" + answer)\n\t\tprint(\"Score: \" + str(score))\n\t\tprint(\"Time taken: \" + str(end-begin))\n\n\t\t# Transmit Answer\n\t\tx = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tx.connect((\"localhost\", int(port_to_use - 1)))\n\t\tx.sendall(answer.encode())\n\t\tx.close()\n\n\n\t# Clean up document store\n\tdocument_store.delete_all_documents(doc_id)\n\tes.indices.delete(index=doc_id)\n\tif os.path.isdir(root + \"/documents\"):\n\t\tshutil.rmtree(root + '/documents')\n\n\n", "repo_name": "readers-companion/Final_Project", "sub_path": "Readers Companion - Backend/NLP/system.py", "file_name": "system.py", "file_ext": "py", "file_size_in_byte": 5725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "jsonbin.Client", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 71, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 72, "usage_type": "call"}, {"api_name": "wikipedia.search", "line_number": 77, "usage_type": "call"}, {"api_name": "wikipedia.search", "line_number": 78, "usage_type": "call"}, {"api_name": "wikipedia.page", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "haystack.reader.farm.FARMReader", "line_number": 119, "usage_type": "call"}, {"api_name": "haystack.document_store.elasticsearch.ElasticsearchDocumentStore", "line_number": 128, "usage_type": "call"}, {"api_name": "haystack.preprocessor.utils.convert_files_to_dicts", "line_number": 133, "usage_type": "call"}, {"api_name": "haystack.preprocessor.cleaning.clean_wiki_text", "line_number": 134, "usage_type": "name"}, {"api_name": "haystack.retriever.sparse.ElasticsearchRetriever", "line_number": 140, "usage_type": "call"}, {"api_name": "haystack.Finder", "line_number": 143, "usage_type": "call"}, {"api_name": "coref.Coreference", "line_number": 147, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 152, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 152, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 161, "usage_type": "call"}, {"api_name": "time.time", "line_number": 169, "usage_type": "call"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 201, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 201, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 201, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "9331802514", "text": "# Behavioural cloning conditioned on state-conditioned language\n\n\n\n\nfrom argparse import ArgumentParser\nimport pickle\nimport time\nimport os\nimport gym\nimport minerl\nimport torch as th\nimport numpy as np\nfrom lib.data_parallel import BalancedDataParallel\nfrom matplotlib.pyplot import figure\nfrom matplotlib import pyplot as plt\nfigure(figsize=(20, 20), dpi=80)\n\nfrom agent import PI_HEAD_KWARGS, MineRLAgent\nfrom IDM_data_loader_np256 import DataLoader\nfrom lib.tree_util import tree_map\nfrom cosine_annealing_warmup import CosineAnnealingWarmupRestarts\nimport numpy as np \n\n\n\n\n\n\n\n\n\n# NOTE: modify with the desired language model and gated-cross-attention weights\nLM_WEIGHTS_FILE = 'TRAINING/LM_ONLY/_FINAL/VLPT_LM_500__LM.weights'\nVPT_LM_XATTN_WEIGHTS_FILE = 'TRAINING/LM_ONLY/_FINAL/VLPT_LM_500__Xattn_VPT_LM.weights'\n\n\n\n\n\n\n\n\n\n\n# ------------------ MODEL HYPERPARAMETERS\nLM_TIMEOUT_RATE = 1 # results in about 7% silence tokens at NeCubS WPM -- wrong calcualteions, is 20% silence\nF_SEQ_LEN = 96\nL_SEQ_LEN = F_SEQ_LEN//LM_TIMEOUT_RATE\nLM_type = \"transfo-xl-wt103\"\nXATNN_MEMLEN = 128\n\nVPT_MODEL_FILE = 'foundation-model-1x.model' #'VLPT/2x.model'\nVPT_WEIGHTS_FILE = 'foundation-model-1x.weights' # 'VLPT/bc-early-game-2x.weights' # 'VLPT/rl-from-early-game-2x.weights' \nVPT_WIDTH = 1024\nDTYPE = th.bfloat16\n\nTRAINING_LOG_FILE = 'TRAINING/VPT_ONLY/training_log'\nOUTPUT_WEIGHTS = 'TRAINING/VPT_ONLY/VLPT.weights'\n# VPT model automatically downloads transfo_xl weights from HuggingFace and uses those for LM. If weights include the LM it should be overwritten though?\n\n\n\n\n\n# -------------------- TRAINING HYPERPARAMETERS\nVPT_LEARNING_RATE = 0.00002 # VPT paper did 0.000181 for finetuning: [we are training to a very different task], [VPT uses linear learning rate decay], [] # to keep the LM intact I dont \nwarmup_steps = 400 # warmup should be very short since the transformers are pretrained # PaLI uses 1k warmup steps, obviously dont want to do more\nBATCH_SIZE = 16\nEPOCHS = 5\nN_WORKERS = 31 # Needs to be <= number of videos # Ideally more than batch size to create variation in datasets (otherwise, you will get a bunch of consecutive samples)\n\nVPT_WEIGHT_DECAY = 0.039428 # VPT weigh decay. transfoxl weight decay is \nVPT_MAX_GRAD_NORM = 1.0 # VPT says 5.0, transfoXL says 0.25. We will basically c\n\nEVAL_BATCH_SIZE=4\nnum_videos = 172\n\nDATASET = 'DATASET/'\n\nTRAINING_PROGRESS = 'TRAINING/VPT_ONLY/training_progress'\nmax_train_steps = (EPOCHS*num_videos*20*60*30)/(F_SEQ_LEN*BATCH_SIZE) # num steps = number of frames / number of frames per batch# 3*10 mins per video = 600000 ms -> 4687 chunks of 128 frames. want 1000 hours video = 60,000 minutes = 6,000 videos of 10 minutes each\nLOSS_REPORT_RATE = 10\nEVALUATION_RATE = 100\n#higher tha its peak laerning rate. finetuning a multimdodal LM with the same peak lr seems ok according to PaLI,Flamingo but they also train on other tasks, maybe just keep some minecraft data for langauge training?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ------------------------------------- USEFUL UTILITIES\ndef load_model_parameters(path_to_model_file):\n agent_parameters = pickle.load(open(path_to_model_file, \"rb\"))\n policy_kwargs = agent_parameters[\"model\"][\"args\"][\"net\"][\"args\"]\n pi_head_kwargs = agent_parameters[\"model\"][\"args\"][\"pi_head_opts\"]\n pi_head_kwargs[\"temperature\"] = float(pi_head_kwargs[\"temperature\"])\n return policy_kwargs, pi_head_kwargs\n\ndef save_hidden_states_VPT(video_ids, hidden_state, saved_hidden_states):\n # Unpack the hidden states\n # Iterate over the batch dimension\n for b in range(BATCH_SIZE):\n video_id = video_ids[b]\n video_hidden_state = []\n for l in range(4):\n (hidden_state_1, (hidden_state_2a, hidden_state_2b)) = hidden_state[l]\n #hidden_state_1[:]=False\n #video_id=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n \n # Get the hidden state for this video\n video_hidden_state_layer = (hidden_state_1[b].clone(), (hidden_state_2a[b].clone(), hidden_state_2b[b].clone()))\n video_hidden_state.append(video_hidden_state_layer)\n # Save the hidden state for this video\n saved_hidden_states[video_id] = video_hidden_state\n \n return saved_hidden_states\n\ndef load_hidden_states_VPT(video_ids, saved_hidden_states):\n assert isinstance(video_ids, list)\n assert(len(video_ids)==BATCH_SIZE)\n B = BATCH_SIZE\n T = 128\n E = VPT_WIDTH\n \n # Initialize the hidden states\n hidden_state_1 = [th.zeros([B, 1, T], dtype=th.bool).to(DEVICE)]*4\n hidden_state_2a = [th.zeros([B, T, E], dtype=DTYPE).to(DEVICE)]*4\n hidden_state_2b = [th.zeros([B, T, E], dtype=DTYPE).to(DEVICE)]*4\n \n # Iterate over the batch dimension\n for b in range(B):\n video_id = video_ids[b]\n #video_id=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n \n # Check if a hidden state has been saved for this video\n if video_id in saved_hidden_states:\n\n for l in range(4): # repeat for each layer in VPT\n # Get the saved hidden state for this video\n (video_hidden_state_1, (video_hidden_state_2a, video_hidden_state_2b)) = saved_hidden_states[video_id][l]\n \n # Set the hidden state for this video\n hidden_state_1[l][b] = video_hidden_state_1\n hidden_state_2a[l][b] = video_hidden_state_2a\n hidden_state_2b[l][b] = video_hidden_state_2b\n else:\n print(\"VPT NEW VIDEO SEEN: ADD FRESH INIT. STATE\", video_id)\n\n for l in range(4): # repeat for each layer in VPT\n # Get a new initial hidden state for this video\n \n \n _, (video_hidden_state_2a, video_hidden_state_2b) = policy.initial_state(1)[l]\n \n # Set the initial hidden state for this video\n #print(video_hidden_state_1.shape)\n is_first_frame_true = th.zeros((1, 128), dtype=th.bool).to(DEVICE)\n is_first_frame_true[:,0]=True\n hidden_state_1[l][b] = is_first_frame_true\n hidden_state_2a[l][b] = video_hidden_state_2a\n hidden_state_2b[l][b] = video_hidden_state_2b\n\n hidden_state = []\n for i in range(4):\n hidden_state_layer = hidden_state_1[l], (hidden_state_2a[l], hidden_state_2b[l])\n hidden_state.append(hidden_state_layer)\n\n return hidden_state\n\ndef load_hidden_states_LM(video_ids, saved_hidden_states):\n assert isinstance(video_ids, list)\n try:\n T = policy.net.LM.transformer.mem_len\n n_layers = policy.net.LM.transformer.n_layer\n except: \n return None\n B = BATCH_SIZE\n E = 1024\n\n out_hidden_state = []\n for i in range(n_layers):\n out_hidden_state.append(th.zeros([T,B,E], dtype=DTYPE).to(DEVICE))\n\n for b, video in enumerate(video_ids):\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n\n if video in saved_hidden_states:\n hidden_state = saved_hidden_states[video]\n else:\n hidden_state = policy.net.LM.transformer.init_mems(1)\n #print(\"LM NEW VIDEO SEEN: ADD FRESH INIT. STATE\", video)\n\n for l in range(n_layers):\n #print('\\n',out_hidden_state[l].shape)\n #print(hidden_state[l].shape)\n out_hidden_state[l][:T,b,:E] = hidden_state[l].clone().squeeze(1)\n\n\n return out_hidden_state\n\ndef save_hidden_states_LM(video_ids, hidden_state, saved_hidden_states):\n try:\n T = policy.net.LM.transformer.mem_len\n n_layers = policy.net.LM.transformer.n_layer\n except:\n return None\n B = BATCH_SIZE\n E = 1024\n\n for b, video in enumerate(video_ids): #frames:\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n out_hidden_state = []\n for layer in hidden_state: # rewrite with the new one\n layer_sample = layer[:T,b,:E].clone().unsqueeze(1)\n out_hidden_state.append(layer_sample) # MAKE SURE WE CLONE - we dont want to mutate states that are are in use \n\n saved_hidden_states[video] = out_hidden_state\n\ndef load_hidden_states_Xattn(video_ids, saved_hidden_states, SEQ_LEN, E):\n #assert hidden_state.shape == [BATCH_SIZE,F_SEQ_LEN or L_SEQ_LEN, 2048 or 1024]\n \n XATTN_MEMLEN=XATNN_MEMLEN\n T = XATTN_MEMLEN + SEQ_LEN\n B = BATCH_SIZE\n\n out_hidden_state = th.zeros([B,T,E], dtype=DTYPE).to(DEVICE) # keys may have different lengths, so we pad with -10 and mask them in VLPT forward\n\n for b, video in enumerate(video_ids):\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n\n if video in saved_hidden_states:\n hidden_state = saved_hidden_states[video]\n else:\n hidden_state = th.zeros([1,T,E], dtype=DTYPE).to(DEVICE) # no past keys\n \n #print(\"LM NEW VIDEO SEEN: ADD FRESH INIT. STATE\", video)\n\n out_hidden_state[b] = hidden_state.clone().squeeze(0)\n\n return out_hidden_state\n\ndef save_hidden_states_Xattn(video_ids, hidden_state, saved_hidden_states):\n #assert hidden_state.shape == [BATCH_SIZE,F_SEQ_LEN or L_SEQ_LEN, 2048 or 1024]\n \n for b, video in enumerate(video_ids): #frames:\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n out_hidden_state = hidden_state[b].clone().unsqueeze(0)\n\n saved_hidden_states[video] = out_hidden_state\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef VPT_train():\n \n global eval_data_loader, policy, agent, vanvpt_agent, DEVICE, s1, s2 # for eval function\n if th.cuda.is_available():\n DEVICE = \"cuda\"\n else:\n DEVICE = \"cpu\"\n\n #try:\n s1 = th.cuda.Stream()\n s2 = th.cuda.Stream() # for multithreading. When we need to calculate forward pass for training model and original VPT model for KL-divergence, we can do both concurrently.\n #except:\n # pass\n\n\n ### ---------------------------- initialise dataset and training\n print('BC: starting data loaders')\n ## Data Loader init\n train_data_loader = DataLoader(\n dataset_dir=DATASET+'train/',\n n_workers=N_WORKERS,\n batch_size=BATCH_SIZE,\n F_SEQ_LEN=L_SEQ_LEN*LM_TIMEOUT_RATE,\n LM_TIMEOUT_RATE=LM_TIMEOUT_RATE,\n LM_SILENCE_TOKEN=2,\n n_epochs=EPOCHS,\n start_time='rand')\n\n\n\n ### ---------------------- initialise BLC agent\n print('BC: LOADING VLPT')\n ### VPT INIT\n agent_policy_kwargs, agent_pi_head_kwargs = load_model_parameters(VPT_MODEL_FILE)\n # To create model with the right environment.\n # All basalt environments have the same settings, so any of them works here\n agent = MineRLAgent(device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs, LM_type=LM_type, LM_TIMEOUT_RATE=LM_TIMEOUT_RATE, L_SEQ_LEN=L_SEQ_LEN, dtype=DTYPE)\n #agent = MineRLAgent(device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs, LM_type=None, LM_TIMEOUT_RATE=LM_TIMEOUT_RATE, L_SEQ_LEN=L_SEQ_LEN, dtype=DTYPE)\n \n agent.load_weights(VPT_WEIGHTS_FILE) #@ DEBUG EVALUATE\n agent.load_weights(VPT_LM_XATTN_WEIGHTS_FILE) # load Xattn from finetuned LM-only frame-conditioned training\n agent.load_weights(LM_WEIGHTS_FILE)# load LM from finetuned LM-only frame-conditioned training\n #agent.load_weights('TRAINING/VPT_ONLY/final_7/VLPT_2500_.weights')\n\n policy = agent.policy # = th.compile(agent.policy)\n\n ## enable dropout for apporopriate layers\n policy.net.eval() \n policy.net.VPT0_VPT1_dropout.train() # We are only training Xattn2, VPT2-VPT4 and the final layers. so these layers should have dropout applied. These get input from vpt1 and LM. this means we need dropout to apply dropout between VPT1 and VPT 2 (im mixing indices starting at 0 and 1, you know what I mean, neither is very clear :P).\n policy.net.recurrent_layer.blocks[0].train() # for proper dropout ebtween VPT1 and VPT2 we should do this, as this applies the residual after dropout. HOWEVER, this would also apply dropout to the LM input and that an unecessary slowdown of training since LM is not being trained and we apply dropout to the LM input to whats being trained anyway. we COULD reorganise the output VPT1 so that it outputs the residual and the last FFW output separately so we can manually apply dropout and reconnect the residual where needed but this is complicated and porbably not that necessary. Ive spent enough time optimising performance. applying dropout to the residual is probably not that bad right? I at least didnt notice the difference with undoing this with theLM-only training, where its possible to do this possible but i wasnt doing it before.\n for i in [1,2,3]:\n policy.net.recurrent_layer.blocks[i].train() # add dropout to all VPT layers beign trained (2-4)\n policy.net.lastlayer.train()\n policy.net.final_ln.train() # these last two dont even have dropout but i believe in magic\n policy.net.Xattn_LM_VPT.train()\n policy.net.LM.transformer.layers[-1].pos_ff.CoreNet[-1].train() # activate final dropout layer in LM so that residual is applied. This aplpies dropout to LM input of Xattn\n \n\n\n # -- freeze untrained layers\n # we dont train VPT1. LM was trained on this, training it further will likely improve BC performance at cost of LM performance. \n VPT_trainable_params = set()\n VPT_trainable_params.update(set(policy.net.recurrent_layer.blocks[1:4].parameters()))\n VPT_trainable_params.update(set(policy.net.lastlayer.parameters()))\n VPT_trainable_params.update(set(policy.net.final_ln.parameters()))\n VPT_trainable_params.update(set(policy.net.Xattn_LM_VPT.parameters()))\n VPT_trainable_params = list(VPT_trainable_params)\n \n ## freeze layers not being trained -dont updates its params - remove grads # ACTUALLY NEVERMIND - we still want gradients to from from ... actually NVM. I was going to say need grads from T1 so T1 can be learnt, but LM is pretrained to dpeend on t1, and changing it might hurt language, so we leave it frozen and grad-less; gradient-free\n frozen_params = set()\n frozen_params = set(policy.net.parameters()) - set(VPT_trainable_params) # removes LM, first transformer layer\n for param in (list(frozen_params)):\n param.requires_grad=False\n\n\n ### LOAD VANILLA_VPT FOR KL DIVERGENCE CHECKS\n print('BLC: LOADING VAN_VPT')\n vanvpt_agent_policy_kwargs, vanvpt_agent_pi_head_kwargs = load_model_parameters(VPT_MODEL_FILE)\n # To create model with the right environment.\n # All basalt environments have the same settings, so any of them works here\n vanvpt_agent = MineRLAgent(device=DEVICE, policy_kwargs=vanvpt_agent_policy_kwargs, pi_head_kwargs=vanvpt_agent_pi_head_kwargs, LM_type=None, dtype=DTYPE)\n vanvpt_agent.load_weights(VPT_WEIGHTS_FILE)\n \n #vanvpt_agent.policy.net = th.compile(vanvpt_agent.policy.net)\n vanvpt_agent.policy.eval()\n\n\n\n\n # --- DEFINE OPTIMIZER # dont optimize CNN section.\n print('BC: OPTIMISER')\n\n optimizer = th.optim.AdamW(params=VPT_trainable_params, lr=VPT_LEARNING_RATE, weight_decay=VPT_WEIGHT_DECAY)\n lr_schedule = CosineAnnealingWarmupRestarts(optimizer,\n first_cycle_steps=max_train_steps,\n cycle_mult=1.0,\n max_lr=VPT_LEARNING_RATE, #@ WARNING: this sets both VPT and LM learnig rates to the same. For now this is okay because they are the same anyway, but this will need modifying if different learning rates are used in the end\n min_lr=0,\n warmup_steps=warmup_steps,\n gamma=1.0)\n\n\n\n\n \n # --------------------------- start training loop\n print('BC: MAIN LOOP:')\n saved_hidden_states_VPT = {} # this is so that, despite workers>batch size and therefore video not being streamed in perfect order across batches, we can keep track of VPT_hidden_stae and LM_hidden state\n saved_hidden_states_LM = {} # same^\n saved_hidden_states_Xattn1 = {}\n saved_hidden_states_Xattn2 = {}\n saved_hidden_states_vanVPT = {} # same^\n\n lowest_val_loss = [float('inf')]*4\n is_first_frame = th.zeros((BATCH_SIZE, F_SEQ_LEN), dtype=th.bool).to(DEVICE)\n current_video_group_id = [0]*BATCH_SIZE\n start_time = time.time()\n loss_sum=np.zeros([0,2])\n val_loss_sum=np.zeros([0,2]) # only have BC loss and KL-divergence to track\n gates=np.zeros([0,2])\n # get multiple steams of 10 minutes* video across multiple batches. continue until (to ensure lanauge model sees far back langauge)\n for batch_i, (video_group_id, subseq_ids, batch_frames, batch_words, batch_actions, finished_videos) in enumerate(train_data_loader):\n\n\n\n\n # -------------------------------------------- EVALUATION------------------------------------------\n if batch_i%EVALUATION_RATE == 0:\n print(\"## ---------------------------------- - EVAL - ---------------------------------------\", batch_i)\n eval_data_loader = DataLoader(\n dataset_dir=DATASET+'valid/',\n n_workers=EVAL_BATCH_SIZE,\n batch_size=EVAL_BATCH_SIZE, \n F_SEQ_LEN=F_SEQ_LEN,\n n_epochs=1,\n LM_TIMEOUT_RATE=LM_TIMEOUT_RATE,\n max_subseqs_per_traj=30,\n start_time=120)\n VPT_eval_loss, noised_VPT_eval_loss = VPT_evaluate()\n del eval_data_loader\n \n \n val_loss = np.asarray([[VPT_eval_loss, noised_VPT_eval_loss]])\n val_loss_sum = np.concatenate([val_loss_sum, val_loss])\n \n # --- plot val_loss\n plt.plot(val_loss_sum[:,0], color='blue')\n try:\n os.remove('TRAINING/VPT_ONLY/val_loss_graph_.png')\n except: \n pass\n plt.savefig('TRAINING/VPT_ONLY/val_loss_graph_.png')\n plt.clf()\n \n \n # --- plot performance difference between matched words/frames and unmatched\n plt.plot(val_loss_sum[:,1]-val_loss_sum[:,0], color='black') # if NN learning to use words properly, then noised_loss>val_loss, so graph goes up\n try:\n os.remove('TRAINING/VPT_ONLY/noised_diff_graph_.png')\n except: \n pass\n plt.savefig('TRAINING/VPT_ONLY/noised_diff_graph_.png')\n plt.clf()\n\n\n\n\n line=str(\"Eval: VPT_loss: {0}, noised_VPT_loss: {1}\\nXattnGate:1{2}\".format(\n str(VPT_eval_loss), \n str(noised_VPT_eval_loss), \n str(agent.policy.net.Xattn_LM_VPT.alpha_xattn.item())+','+str(agent.policy.net.Xattn_LM_VPT.alpha_dense.item())))\n with open('TRAINING/VPT_ONLY/training_log_val','a') as file:\n file.write(line)\n \n\n # save a model if ALL losses are lower\n save_model=True\n for loss_lowest, loss_new in zip(lowest_val_loss, [VPT_eval_loss, noised_VPT_eval_loss]):\n if loss_lowest < loss_new:\n save_model = False \n if save_model:\n print(\"#----------------------- BEST VAL LOSS! SAVING!\")\n # SAVE MODEL WEIGHTS\n lowest_val_loss = [VPT_eval_loss, noised_VPT_eval_loss]\n output_path = '/'.join(OUTPUT_WEIGHTS.split('/')[0:-1])+'/'\n output_bonus = '_'+str(batch_i)+'_'\n output_name = '.'.join(OUTPUT_WEIGHTS.split('/')[-1].split('.')[0:-1])+output_bonus\n th.save(policy.state_dict(), output_path+output_name+'.weights')\n \n # ALSO SAVE OPTIMIZER AND LEARNING_RATE_SCHEDULER STATES\n th.save(optimizer.state_dict(), output_path+output_bonus+'.optim')\n th.save(lr_schedule.state_dict(), output_path+output_bonus+'.lrschedule')\n print('## ---------------------------------- - TRAIN - ---------------------------------------\"')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # ---------------------------------------------------------- TRAINING BATCH --------------------------------------------\n \n\n\n\n ### ------------ FORMAT INPUT \n # format words\n x_words, y_words = batch_words['input_ids'], batch_words['labels']\n x_words=th.from_numpy(x_words).to(DEVICE)\n y_words=th.from_numpy(y_words).to(DEVICE)\n # format input frames\n batch_frames['img'] = th.from_numpy(batch_frames['img']).to(DTYPE).to(DEVICE) #\n\n # format action labels\n batch_actions['camera'] = batch_actions['camera'].reshape([-1,2])\n batch_actions['buttons'] = batch_actions['buttons'].reshape([-1,20])\n\n action_labels = agent._IDM_action_to_env(batch_actions)\n #print('\\n\\n',action_labels)\n action_labels = agent._env_action_to_agent(action_labels, to_torch=True, check_if_null=False)\n\n action_labels['camera'] = action_labels['camera'].reshape([BATCH_SIZE,F_SEQ_LEN])\n action_labels['buttons'] = action_labels['buttons'].reshape([BATCH_SIZE,F_SEQ_LEN])\n #1/0\n\n\n\n\n\n ## ---------- LOAD MEMS FOR VIDEOS\n VPT_state = load_hidden_states_VPT(video_group_id, saved_hidden_states_VPT)\n vanVPT_state = load_hidden_states_VPT(video_group_id, saved_hidden_states_vanVPT)\n LM_state = load_hidden_states_LM(video_group_id, saved_hidden_states_LM)\n Xattn1_state = load_hidden_states_Xattn(video_group_id, saved_hidden_states_Xattn1, SEQ_LEN=F_SEQ_LEN, E=VPT_WIDTH)\n Xattn2_state = load_hidden_states_Xattn(video_group_id, saved_hidden_states_Xattn2, SEQ_LEN=L_SEQ_LEN, E=1024)\n\n\n th.cuda.synchronize()\n ## ----------------- VLPT MODEL FORWARD PASS\n #with th.cuda.stream(s1):\n # PREDICT VLPT (input frames and paired language tokens). Get output VPT actions, and LM loss\n VLPT_pd_action, _, _, VPT_state, LM_state, _, Xattn1_state, Xattn2_state = policy.get_output_for_observations( # we still need LM state for proper LM inference\n ob_words=x_words,\n ob_frames=batch_frames,\n VPT_state=VPT_state,\n first=is_first_frame,\n LM_state=LM_state,\n LM_labels=None,\n Xattn1_state=Xattn1_state,\n Xattn2_state=Xattn2_state) #not training LM, dont need labels\n\n #with th.cuda.stream(s2):\n # # ----- get action KL-divergence to original VPT\n # with th.no_grad():\n # vanVPT_pd_action, _, _, vanVPT_state, _, _, _, _ = vanvpt_agent.policy.get_output_for_observations(\n # ob_words=None,\n # ob_frames=batch_frames, # give same input\n # VPT_state=vanVPT_state, # use separate mems since VLPT mems are polluted from vanillavpt by LM signal, so cannot be used by original VPT\n # first=is_first_frame)\n\n th.cuda.synchronize() \n\n\n\n # ------------------------- BACKWARD PASS \n # calculate loss\n KL_divergence = 0 #agent.policy.get_kl_of_action_dists(VLPT_pd_action, vanVPT_pd_action).mean() \n VLPT_pd_action['buttons'] = VLPT_pd_action['buttons'].view([BATCH_SIZE*F_SEQ_LEN, 1, 1, 8641])\n VLPT_pd_action['camera'] = VLPT_pd_action['camera'].view([BATCH_SIZE*F_SEQ_LEN, 1, 1, 121])\n VPT_loss = -policy.get_logprob_of_action(VLPT_pd_action, action_labels) # th.zeros([1]) \n #since VPT loss starts higher than LM, we scale it to offset the difference so optimizer doesnt go hard against LM objective in beginning\n _loss = VPT_loss.mean() + KL_divergence\n _loss.backward()\n th.nn.utils.clip_grad_norm_(VPT_trainable_params, VPT_MAX_GRAD_NORM)\n optimizer.step()\n lr_schedule.step()\n policy.zero_grad(set_to_none=True)\n\n # Make sure we do not try to backprop through sequence in future iterations\n vanVPT_state = tree_map(lambda x: x.detach(), vanVPT_state)\n VPT_state = tree_map(lambda x: x.detach(), VPT_state)\n LM_state = tree_map(lambda x: x.detach(), LM_state)\n Xattn1_state = Xattn1_state.detach()\n Xattn2_state = Xattn2_state.detach()\n\n # save hidden states from these videos for next time they show up. print('save_hid..')\n save_hidden_states_VPT(video_group_id, VPT_state, saved_hidden_states_VPT)\n save_hidden_states_VPT(video_group_id, vanVPT_state, saved_hidden_states_vanVPT)\n save_hidden_states_LM(video_group_id, LM_state, saved_hidden_states_LM)\n save_hidden_states_Xattn(video_group_id, Xattn1_state, saved_hidden_states_Xattn1)\n save_hidden_states_Xattn(video_group_id, Xattn2_state, saved_hidden_states_Xattn2)\n \n th.cuda.empty_cache()\n\n\n # --- free up hidden states whose videos have ended (i.e. fix memory leak in original VPT github)\n for video in finished_videos:\n if video in saved_hidden_states_VPT:\n print(\"video ended:\",video,\" cleaning up hidden state...\")\n saved_hidden_states_VPT.pop(video)\n saved_hidden_states_LM.pop(video)\n\n\n\n print('BC: TRAIN BATCH DONE!', video_group_id, subseq_ids, batch_i, finished_videos, VPT_loss.mean().item())\n\n\n\n\n\n # ----- LOSS REPORTING\n os.chdir('/content/drive/MyDrive/_DISSERTATION/')\n loss = np.asarray([[VPT_loss.mean().item(), KL_divergence]])\n loss_sum = np.concatenate([loss_sum, loss],axis=0)\n gates_now = np.asarray([[ abs(agent.policy.net.Xattn_LM_VPT.alpha_xattn.item()),\n abs(agent.policy.net.Xattn_LM_VPT.alpha_dense.item())\n ]])\n gates=np.concatenate([gates,gates_now],axis=0)\n \n if batch_i%LOSS_REPORT_RATE==0:\n print('logging progress...')\n time_since_start = time.time() - start_time\n \n #plot loss\n plt.plot(loss_sum[:,0], color='blue')\n #plt.plot(loss_sum[:,1], color='black')\n try:\n os.remove('TRAINING/VPT_ONLY/loss_graph_.png')\n except:\n pass\n plt.savefig('TRAINING/VPT_ONLY/loss_graph_.png')\n plt.clf()\n plt.close()\n \n #plot gates\n plt.plot(gates[:,0], color='darkblue')\n plt.plot(gates[:,1], color='blue')\n try:\n os.remove('TRAINING/VPT_ONLY/gates_graph_.png')\n except:\n pass\n plt.savefig('TRAINING/VPT_ONLY/gates_graph_.png') #print\n plt.clf()\n plt.close()\n \n # record training progress - so that if it crashes part way through, we can re-try training and resume from the same spot (actually this implementation dumps the rest of the video and we start at the next one.)\n with open(TRAINING_PROGRESS, 'a') as progress_file:\n line=str(batch_i)+str(video_group_id)+str(subseq_ids)\n progress_file.write(line)\n line=str(\"Eval: Time:{0}, VPT_loss: {1}, \\nXattnGate:1{2}\".format(\n str(time_since_start),\n str(VPT_loss),\n str(agent.policy.net.Xattn_LM_VPT.alpha_xattn.item())+','+str(agent.policy.net.Xattn_LM_VPT.alpha_dense.item())\n ))\n with open('TRAINING/VPT_ONLY/training_log','a') as file:\n file.write(line+'\\n')\n\n\n\n\n\n \n # reset losses \n VPT_loss,KL_divergence=0,0\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef VPT_evaluate():\n \n \n with th.no_grad():\n global agent, eval_data_loader, policy, s2, vanvpt_agent\n \n # put VLPT into testing mode\n policy.eval()\n \n # we dont want to disrupt internal states of training during eval so we use fresh ones\n eval_current_video_group_id = 0\n eval_VPT_loss =0\n noise_eval_VPT_loss =0 \n \n eval_is_first_frame = th.zeros((EVAL_BATCH_SIZE, F_SEQ_LEN), dtype=th.bool).to(DEVICE)\n num_batch=0\n for batch_i, (eval_video_group_id, eval_subseq_ids, eval_batch_frames, eval_batch_words, eval_batch_actions, _) in enumerate(eval_data_loader):\n num_batch+=1\n if eval_video_group_id != eval_current_video_group_id:\n eval_current_video_group_id = eval_video_group_id\n\n eval_VPT_state = policy.initial_state(EVAL_BATCH_SIZE)\n eval_LM_state = None\n noise_eval_VPT_state = policy.initial_state(EVAL_BATCH_SIZE)\n noise_eval_LM_state=None\n \n eval_Xattn1_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+F_SEQ_LEN,VPT_WIDTH], dtype=DTYPE).to(DEVICE)\n eval_Xattn2_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+L_SEQ_LEN,1024], dtype=DTYPE).to(DEVICE)\n \n noise_eval_Xattn1_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+F_SEQ_LEN,VPT_WIDTH], dtype=DTYPE).to(DEVICE)\n noise_eval_Xattn2_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+L_SEQ_LEN,1024], dtype=DTYPE).to(DEVICE)\n\n ### ------------- format input from data loader to agent \n # format input frames\n eval_batch_frames['img'] = th.from_numpy(eval_batch_frames['img']).to(DTYPE).to(DEVICE)\n # format input/label words\n x_words, y_words = eval_batch_words['input_ids'], eval_batch_words['labels']\n x_words=th.from_numpy(x_words).to(DEVICE)\n \n noised_x_words = x_words.clone()\n noised_x_words = th.roll(noised_x_words, 1, 0)\n\n eval_batch_actions['camera'] = eval_batch_actions['camera'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN,2])\n eval_batch_actions['buttons'] = eval_batch_actions['buttons'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN,20])\n eval_action_labels = agent._IDM_action_to_env(eval_batch_actions)\n #print('\\n\\n',action_labels)\n eval_action_labels = agent._env_action_to_agent(eval_action_labels, to_torch=True, check_if_null=True)\n\n\n\n ### ----------- FORWARD VLPT batch with the words swapped around compared to teh frames in (terms of samples index in teh batch). this tells us how much the predicted actions for teh episode are conditioned on the words for that episode\n th.cuda.empty_cache()\n th.cuda.synchronize()\n with th.cuda.stream(s2):\n noise_eval_VLPT_pd_action, _, _, noise_eval_VPT_state, noise_eval_LM_state, _, noise_eval_Xattn1_hidden_state, noise_eval_Xattn2_hidden_state = policy.get_output_for_observations(\n ob_words=noised_x_words,\n ob_frames=eval_batch_frames,\n VPT_state=noise_eval_VPT_state,\n LM_state=noise_eval_LM_state,\n LM_labels=None,\n first=eval_is_first_frame.clone(),\n Xattn1_state=noise_eval_Xattn1_hidden_state,\n Xattn2_state=noise_eval_Xattn2_hidden_state)\n\n ### ----------- FORWARD normal VLPT batch\n with th.cuda.stream(s1):\n eval_VLPT_pd_action, _, _, eval_VPT_state, eval_LM_state, _, eval_Xattn1_hidden_state, eval_Xattn2_hidden_state = policy.get_output_for_observations(\n ob_words=x_words,\n ob_frames=eval_batch_frames,\n VPT_state=eval_VPT_state,\n LM_state=eval_LM_state,\n LM_labels=None,\n first=eval_is_first_frame.clone(),\n Xattn1_state=eval_Xattn1_hidden_state,\n Xattn2_state=eval_Xattn2_hidden_state)\n th.cuda.synchronize()\n th.cuda.empty_cache()\n\n\n eval_VLPT_pd_action['buttons'] = eval_VLPT_pd_action['buttons'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 8641])\n eval_VLPT_pd_action['camera'] = eval_VLPT_pd_action['camera'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 121])\n\n noise_eval_VLPT_pd_action['buttons'] = noise_eval_VLPT_pd_action['buttons'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 8641])\n noise_eval_VLPT_pd_action['camera'] = noise_eval_VLPT_pd_action['camera'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 121])\n\n \n # calculate loss\n eloss = -policy.get_logprob_of_action(eval_VLPT_pd_action, eval_action_labels).mean().item()\n eval_VPT_loss += eloss \n noise_eval_VPT_loss += -policy.get_logprob_of_action( noise_eval_VLPT_pd_action, eval_action_labels).mean().item()\n print(\"BC_EVAL: batch done!\", eval_video_group_id, eval_subseq_ids, eloss)\n\n \n eval_VPT_loss /= num_batch\n noise_eval_VPT_loss /= num_batch\n\n # return VLPT to training mode ## enable dropout for apporopriate layers\n policy.net.eval()\n policy.net.LM.transformer.layers[-1].pos_ff.CoreNet[-1].train() # enable dropout between LM and Xattn2\n policy.net.Xattn_LM_VPT.train() # enable dropout inside xattn2 and before VPT transformer layers 2-4\n for i in [1,2,3]:\n policy.net.recurrent_layer.blocks[i].train() # add dropout to all VPT layers beign trained (2-4)\n policy.net.lastlayer.train()\n policy.net.final_ln.train() # these last two dont even have dropout but i believe in magic\n\n print(eval_VPT_loss, noise_eval_VPT_loss)\n return eval_VPT_loss, noise_eval_VPT_loss\n \n\n # agent estimate 10 video sequence batches of 512 with same tgt_len and mem_len\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n VPT_train()", "repo_name": "GaPaLa/VLPT", "sub_path": "VLPT/TRAIN_behavioural_cloning.py", "file_name": "TRAIN_behavioural_cloning.py", "file_ext": "py", "file_size_in_byte": 35317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.bfloat16", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 174, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 290, "usage_type": "attribute"}, {"api_name": "torch.cuda.Stream", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 296, "usage_type": "attribute"}, {"api_name": "torch.cuda.Stream", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 297, "usage_type": "attribute"}, {"api_name": "IDM_data_loader_np256.DataLoader", "line_number": 305, "usage_type": "call"}, {"api_name": "agent.MineRLAgent", "line_number": 323, "usage_type": "call"}, {"api_name": "agent.load_weights", "line_number": 326, "usage_type": "call"}, {"api_name": "agent.load_weights", "line_number": 327, "usage_type": "call"}, {"api_name": "agent.load_weights", "line_number": 328, "usage_type": "call"}, {"api_name": "agent.policy", "line_number": 331, "usage_type": "attribute"}, {"api_name": "agent.MineRLAgent", "line_number": 367, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 379, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 379, "usage_type": "attribute"}, {"api_name": "cosine_annealing_warmup.CosineAnnealingWarmupRestarts", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 401, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 406, "usage_type": "call"}, {"api_name": "IDM_data_loader_np256.DataLoader", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 433, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 433, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 435, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 438, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 438, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 439, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 439, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 443, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 443, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 445, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 448, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 448, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 449, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 449, "usage_type": "name"}, {"api_name": "agent.policy.net.Xattn_LM_VPT.alpha_xattn.item", "line_number": 457, "usage_type": "call"}, {"api_name": "agent.policy", "line_number": 457, "usage_type": "attribute"}, {"api_name": "agent.policy.net.Xattn_LM_VPT.alpha_dense.item", "line_number": 457, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 474, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 477, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 478, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 502, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 503, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 505, "usage_type": "call"}, {"api_name": "agent._IDM_action_to_env", "line_number": 511, "usage_type": "call"}, {"api_name": "agent._env_action_to_agent", "line_number": 513, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 531, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 531, "usage_type": "attribute"}, {"api_name": "torch.cuda.synchronize", "line_number": 554, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 554, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 567, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 567, "usage_type": "attribute"}, {"api_name": "lib.tree_util.tree_map", "line_number": 573, "usage_type": "call"}, {"api_name": "lib.tree_util.tree_map", "line_number": 574, "usage_type": "call"}, {"api_name": "lib.tree_util.tree_map", "line_number": 575, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 586, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 586, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 605, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 606, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 607, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 608, "usage_type": "call"}, {"api_name": "agent.policy.net.Xattn_LM_VPT.alpha_xattn.item", "line_number": 608, "usage_type": "call"}, {"api_name": "agent.policy", "line_number": 608, "usage_type": "attribute"}, {"api_name": "agent.policy.net.Xattn_LM_VPT.alpha_dense.item", "line_number": 609, "usage_type": "call"}, {"api_name": "agent.policy", "line_number": 609, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 611, "usage_type": "call"}, {"api_name": "time.time", "line_number": 615, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 618, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 618, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 621, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 624, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 624, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 625, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 625, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 626, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 626, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 629, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 630, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 630, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 632, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 635, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 635, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 636, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 636, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 637, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 637, "usage_type": "name"}, {"api_name": "agent.policy.net.Xattn_LM_VPT.alpha_xattn.item", "line_number": 646, "usage_type": "call"}, {"api_name": "agent.policy", "line_number": 646, "usage_type": "attribute"}, {"api_name": "agent.policy.net.Xattn_LM_VPT.alpha_dense.item", "line_number": 646, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 674, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 685, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 685, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 697, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 698, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 700, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 701, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 705, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 708, "usage_type": "call"}, {"api_name": "torch.roll", "line_number": 711, "usage_type": "call"}, {"api_name": "agent._IDM_action_to_env", "line_number": 715, "usage_type": "call"}, {"api_name": "agent._env_action_to_agent", "line_number": 717, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 722, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 722, "usage_type": "attribute"}, {"api_name": "torch.cuda.synchronize", "line_number": 723, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 723, "usage_type": "attribute"}, {"api_name": "torch.cuda.stream", "line_number": 724, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 724, "usage_type": "attribute"}, {"api_name": "torch.cuda.stream", "line_number": 736, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 736, "usage_type": "attribute"}, {"api_name": "torch.cuda.synchronize", "line_number": 746, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 746, "usage_type": "attribute"}, {"api_name": "torch.cuda.empty_cache", "line_number": 747, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 747, "usage_type": "attribute"}]} +{"seq_id": "29449481307", "text": "# -*- coding: utf-8 -*-\n\nimport flask\nimport json\nimport route\nimport shutil\nimport datetime\nimport time\nimport os\nimport zipfile\n\nfrom sqlalchemy import and_\n\nfrom handler.log import api_logger\nfrom handler.config import appconfig\nfrom handler.pool import mysqlpool\nfrom handler.socket.deploy import single_deploy\n\nfrom model.mysql import model_mysql_planinfo\nfrom model.mysql import model_mysql_tablesnap\nfrom model.mysql import model_mysql_taskinfo\nfrom model.mysql import model_mysql_userinfo\nfrom model.redis import modle_redis_apitestplanworktable\n\n\"\"\"\n 新增正式测试任务创建接口,此任务仅允许本人创建\n 支持创建接口自动化测试以及接口性能测试任务\n ----校验\n 校验账户是否存在\n 校验账户操作令牌\n 校验账户所属角色是否有API操作权限\n 校验传参\n ----操作\n 检查测试计划以及测试版本是否存在且该测试版本是否为临时版本\n 新增调试任务\n 将测试任务数据打包发送给执行应用\n\"\"\"\n\n\n@route.check_token\n@route.check_user\n# @route.check_auth\n@route.check_post_parameter(\n ['planId', int, 1, None],\n ['description', str, None, 200],\n ['startType', int, 1, 2],\n ['runType', int, 1, 2]\n)\ndef task_post():\n # 初始化返回内容\n response_json = {\n \"code\": 200,\n \"msg\": \"操作成功\",\n \"data\": {}\n }\n\n # 取出数据\n # header\n user_id = flask.request.headers['UserId']\n # body\n plan_id = flask.request.json['planId']\n description = flask.request.json['description']\n start_type = flask.request.json['startType']\n run_type = flask.request.json['runType']\n\n # 如果startType为2则需要检查执行时间\n datetime_start_time = None\n datetime_end_time = None\n if start_type == 2:\n\n # 开始时间检查\n if 'startTime' not in flask.request.json:\n api_logger.debug(\"传参缺少startTime\")\n return route.error_msgs[302]['msg_request_params_incomplete']\n elif type(flask.request.json['startTime']) is not int:\n api_logger.debug(\"传参startTime类型错误\")\n return route.error_msgs[301]['msg_value_type_error']\n elif flask.request.json['startTime'] < int(time.time()):\n api_logger.debug(\"传参startTime大小错误\")\n return route.error_msgs[201]['msg_too_early']\n # 结束时间检查\n if 'endTime' not in flask.request.json:\n api_logger.debug(\"传参缺少endTime\")\n return route.error_msgs[302]['msg_request_params_incomplete']\n elif type(flask.request.json['endTime']) is not int:\n api_logger.debug(\"传参endTime类型错误\")\n return route.error_msgs[301]['msg_value_type_error']\n elif flask.request.json['endTime'] < flask.request.json['startTime'] + 10:\n api_logger.debug(\"传参endTime大小错误\")\n return route.error_msgs[201]['msg_task_time_error']\n try:\n datetime_start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(flask.request.json['startTime']))\n datetime_end_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(flask.request.json['endTime']))\n except:\n return route.error_msgs[201]['msg_data_error']\n # 如果runType为1则需要检查执行次数\n times = None\n if run_type == 1:\n print(1111111)\n if 'times' not in flask.request.json:\n return route.error_msgs[302]['msg_request_params_incomplete']\n elif type(flask.request.json['times']) is not int:\n return route.error_msgs[301]['msg_value_type_error']\n elif flask.request.json['times'] < 1:\n return route.error_msgs[201]['msg_data_error']\n times = flask.request.json['times']\n\n # 为了将来能够看日志,必须要有不变的快照数据,所以tableSnap的不靠谱\n # 尝试于redis读取工作台快照临时数据\n # 如果有,以这些内容发起测试任务\n # 如果无,则读取mysql中最新的内容,发起测试任务\n tablesnap_data = None\n redis_get_table_bytes = modle_redis_apitestplanworktable.query_table(plan_id)\n if redis_get_table_bytes is not None:\n tablesnap_data = redis_get_table_bytes.decode('utf-8')\n else:\n # 根���planId去查询工作台快照内容\n try:\n mysql_tablesnap = model_mysql_tablesnap.query.filter(\n and_(\n model_mysql_tablesnap.planId == plan_id,\n model_mysql_tablesnap.status == 1\n )\n ).first()\n api_logger.debug(\"接口测试计划工作台快照内容查找成功\")\n except Exception as e:\n api_logger.debug(\"接口测试计划工作台快照内容查找失败,失败原因:\" + repr(e))\n return route.error_msgs[500]['msg_db_error']\n else:\n # 如果查询出来存在记录并且为正式版本,则继续,否则返回错误信息\n # 需排除数据异常\n if not mysql_tablesnap:\n return route.error_msgs[201]['msg_no_data']\n else:\n tablesnap_data = mysql_tablesnap.table\n\n # 新增测试任务创建记录\n # 1.准备测试任务基础数据\n new_task_info = model_mysql_taskinfo(\n planId=plan_id,\n snap=tablesnap_data,\n taskType=1,\n startType=start_type,\n endType=run_type,\n taskDescription=description,\n createTime=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n createUser=user_id\n )\n # 剩余未填写项目 startTime/endTime/excuteTimes/if_error/vUser/rampUpPeriod\n # startTime/endTime\n if start_type == 1:\n new_task_info.startTime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n elif start_type == 2:\n new_task_info.startTime = datetime_start_time\n new_task_info.endTime = datetime_end_time\n # excuteTimes\n if run_type == 1:\n new_task_info.excuteTimes = times\n # if_error\n # 暂时不支持自定义\n new_task_info.errorType = 1\n # rampUpPeriod\n # 暂时不支持自定义\n new_task_info.rampUpPeriod = 0\n # vUser\n if 'userNum' in flask.request.json:\n normal_v_user = flask.request.json['userNum']\n if type(normal_v_user) is int and normal_v_user in range(1, 1001):\n new_task_info.vUser = normal_v_user\n else:\n return route.error_msgs[301]['msg_value_type_error']\n else:\n return route.error_msgs[302]['msg_request_params_incomplete']\n # 新增测试任务记录\n\n try:\n mysqlpool.session.add(new_task_info)\n mysqlpool.session.commit()\n except Exception as e:\n api_logger.error(\"新增测试任务失败,原因:\" + repr(e))\n return route.error_msgs[500]['msg_db_error']\n else:\n api_logger.debug(\"新增测试任务成功\")\n\n # 准备待发送的测试任务文件\n # 将工作台内容保存为task.json文件\n # 封装测试任务数据\n # 检查文件存放路径\n if not os.path.exists('file/'):\n api_logger.debug('存放测试任务文件的file主目录不存在,尝试创建...')\n try:\n os.makedirs('file/')\n\n except Exception as e:\n api_logger.error('存放测试任务文件的file目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n\n api_logger.debug('存放测试任务文件的file目录创建成功')\n the_now = datetime.datetime.now()\n the_year = str(the_now.year)\n the_month = str(the_now.month)\n the_day = str(the_now.day)\n if not os.path.exists('file/' + the_year):\n api_logger.debug('年份目录不存在,尝试创建...')\n try:\n os.makedirs('file/' + the_year)\n except Exception as e:\n api_logger.error('年份目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('年份目录创建成功')\n if not os.path.exists('file/' + the_year + '/' + the_month):\n api_logger.debug('月份目录不存在,尝试创建...')\n try:\n os.makedirs('file/' + the_year + '/' + the_month)\n except Exception as e:\n api_logger.error('月份目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('月份目录创建成功')\n if not os.path.exists('file/' + the_year + '/' + the_month + '/' + the_day):\n api_logger.debug('日子目录不存在,尝试创建...')\n try:\n os.makedirs('file/' + the_year + '/' + the_month + '/' + the_day)\n except Exception as e:\n api_logger.error('日子目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('日子目录创建成功')\n dir_path = 'file/' + the_year + '/' + the_month + '/' + the_day\n task_dir_path = dir_path + '/task_%s_%s' % (\n str(new_task_info.taskId),\n the_now.strftime('%Y%m%d%H%M%S')\n )\n api_logger.debug('尝试创建测试任务目标目录...')\n try:\n # 于file目录下创建 年/月/日/task_taskId_时间戳 文件夹\n os.makedirs(task_dir_path)\n # 将项目文件夹(其中为参数化文件)复制到task文件夹下\n resource_path = appconfig.get(\"task\", \"filePutDir\")\n resource_path = resource_path[:-1] if resource_path[-1] == \"/\" else resource_path\n resource_path = \"%s/%s\" % (resource_path, plan_id)\n # 根据配置文件中的路径,判断测试计划文件夹是否存在\n if os.path.exists(resource_path) is False or os.path.isdir(resource_path) is False:\n os.makedirs(task_dir_path + '/files')\n else:\n shutil.copytree(resource_path, task_dir_path + '/files')\n except Exception as e:\n api_logger.error('测试任务目标目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('测试任务目标目录创建成功')\n # 将测试任务数据存为json文件\n file = open(task_dir_path + '/task.json', 'w', encoding='utf-8')\n file.write(tablesnap_data)\n file.close()\n # 将文件夹整个进行zip压缩\n z = zipfile.ZipFile(task_dir_path + '.zip', 'w', zipfile.ZIP_DEFLATED)\n # 将task.json/file添加入压缩包\n z.write(os.path.join(task_dir_path, 'task.json'), 'task.json')\n z.write(os.path.join(task_dir_path, 'files'), 'files')\n # 将file文件夹下所有文件添加入压缩包\n for dir_path, dir_names, file_names in os.walk(os.path.join(task_dir_path, 'files')):\n for fn in file_names:\n if fn not in z.NameToInfo:\n z.write(os.path.join(dir_path, fn), os.path.join('files', fn))\n z.close()\n\n # 查询计划类型\n try:\n mysql_planinfo = model_mysql_planinfo.query.filter(\n model_mysql_planinfo.planId == plan_id\n ).first()\n except Exception as e:\n api_logger.debug(\"model_mysql_planinfo数据读取失败,失败原因:\" + repr(e))\n return route.error_msgs[500]['msg_db_error']\n else:\n # 根据计划类型下发测试任务\n if mysql_planinfo.planType == 1:\n deploy_result, deploy_msg = single_deploy(\n base=new_task_info,\n file=task_dir_path + '.zip'\n )\n if not deploy_result:\n print(7777)\n response_json['code'] = 500\n response_json['error_msg'] = '测试任务下发失败,原因:%s,请联系管理员或稍后再发起测试任务' % deploy_msg\n return json.dumps(response_json)\n\n return response_json\n", "repo_name": "erikshe2003/qaplatform_api", "sub_path": "route/api/task/restful_task/post/v1_0_0.py", "file_name": "v1_0_0.py", "file_ext": "py", "file_size_in_byte": 12044, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.request", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 73, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 73, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 76, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 76, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "handler.log.api_logger.debug", "line_number": 79, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 79, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 83, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 83, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 86, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 86, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 89, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 90, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 92, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 93, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "attribute"}, {"api_name": "model.redis.modle_redis_apitestplanworktable.query_table", "line_number": 113, "usage_type": "call"}, {"api_name": "model.redis.modle_redis_apitestplanworktable", "line_number": 113, "usage_type": "name"}, {"api_name": "model.mysql.model_mysql_tablesnap.query.filter", "line_number": 119, "usage_type": "call"}, {"api_name": "model.mysql.model_mysql_tablesnap.query", "line_number": 119, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_tablesnap", "line_number": 119, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 120, "usage_type": "call"}, {"api_name": "model.mysql.model_mysql_tablesnap.planId", "line_number": 121, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_tablesnap", "line_number": 121, "usage_type": "name"}, {"api_name": "model.mysql.model_mysql_tablesnap.status", "line_number": 122, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_tablesnap", "line_number": 122, "usage_type": "name"}, {"api_name": "handler.log.api_logger.debug", "line_number": 125, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 125, "usage_type": "name"}, {"api_name": "handler.log.api_logger.debug", "line_number": 127, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 127, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 128, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 133, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_taskinfo", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 152, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 171, "usage_type": "attribute"}, {"api_name": "route.error_msgs", "line_number": 173, "usage_type": "attribute"}, {"api_name": "handler.pool.mysqlpool.session.add", "line_number": 177, "usage_type": "call"}, {"api_name": "handler.pool.mysqlpool.session", "line_number": 177, "usage_type": "attribute"}, {"api_name": "handler.pool.mysqlpool", "line_number": 177, "usage_type": "name"}, {"api_name": "handler.pool.mysqlpool.session.commit", "line_number": 178, "usage_type": "call"}, {"api_name": "handler.pool.mysqlpool.session", "line_number": 178, "usage_type": "attribute"}, {"api_name": "handler.pool.mysqlpool", "line_number": 178, "usage_type": "name"}, {"api_name": "handler.log.api_logger.error", "line_number": 180, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 180, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 181, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 183, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 183, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 190, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 190, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 192, "usage_type": "call"}, {"api_name": "handler.log.api_logger.error", "line_number": 195, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 195, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 196, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 199, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 199, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 200, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 205, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 205, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 207, "usage_type": "call"}, {"api_name": "handler.log.api_logger.error", "line_number": 209, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 209, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 210, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 212, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 212, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path", "line_number": 213, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 214, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 214, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 216, "usage_type": "call"}, {"api_name": "handler.log.api_logger.error", "line_number": 218, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 218, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 219, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 221, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 221, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 223, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 223, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 225, "usage_type": "call"}, {"api_name": "handler.log.api_logger.error", "line_number": 227, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 227, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 228, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 230, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 230, "usage_type": "name"}, {"api_name": "handler.log.api_logger.debug", "line_number": 236, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 236, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 239, "usage_type": "call"}, {"api_name": "handler.config.appconfig.get", "line_number": 241, "usage_type": "call"}, {"api_name": "handler.config.appconfig", "line_number": 241, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 245, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 246, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 248, "usage_type": "call"}, {"api_name": "handler.log.api_logger.error", "line_number": 250, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 250, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 251, "usage_type": "attribute"}, {"api_name": "handler.log.api_logger.debug", "line_number": 253, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 253, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 259, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 261, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 264, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_planinfo.query.filter", "line_number": 272, "usage_type": "call"}, {"api_name": "model.mysql.model_mysql_planinfo.query", "line_number": 272, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_planinfo", "line_number": 272, "usage_type": "name"}, {"api_name": "model.mysql.model_mysql_planinfo.planId", "line_number": 273, "usage_type": "attribute"}, {"api_name": "model.mysql.model_mysql_planinfo", "line_number": 273, "usage_type": "name"}, {"api_name": "handler.log.api_logger.debug", "line_number": 276, "usage_type": "call"}, {"api_name": "handler.log.api_logger", "line_number": 276, "usage_type": "name"}, {"api_name": "route.error_msgs", "line_number": 277, "usage_type": "attribute"}, {"api_name": "handler.socket.deploy.single_deploy", "line_number": 281, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 289, "usage_type": "call"}, {"api_name": "route.check_token", "line_number": 40, "usage_type": "attribute"}, {"api_name": "route.check_user", "line_number": 41, "usage_type": "attribute"}, {"api_name": "route.check_post_parameter", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "15964740024", "text": "#!/usr/bin/env python3\n\nimport conf\n\nimport argparse\nimport time\n\nimport requests\nimport tabulate\n\nCLEAR = \"\\x1b[2J\\x1b[1;1H\"\n\n\ndef get(path):\n url = \"http://localhost:{}\".format(conf.CHIEF_API_PORT)\n r = requests.get(url + path)\n return r.json()\n\n\ndef table(info, *a, **ka):\n if 'tablefmt' not in ka:\n ka['tablefmt'] = \"fancy_grid\"\n\n if type(info) == dict:\n info = [list(i) for i in info.items()]\n return tabulate.tabulate(info, [], *a, **ka)\n elif type(info) == list and type(info[0] == dict):\n headers = sorted(info[0].keys())\n values = []\n for e in info:\n values.append([e.get(k, '') for k in headers])\n return tabulate.tabulate(values, headers, *a, **ka)\n\n return tabulate.tabulate(info, *a, **ka)\n\n\ndef do(args):\n now = time.strftime(\"%h %d, %H:%M\")\n if args.cmd == \"jobs\":\n print(now + '\\n' + table(get(\"/jobs/list\")))\n elif args.cmd == \"run\":\n assert args.job, \"--job required\"\n url = \"/jobs/run/{}\".format(args.job)\n print(now + '\\n' + table(get(url)))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"cmd\", choices=[\"jobs\", \"run\", \"ping\"])\n parser.add_argument(\"--auto\", type=int)\n parser.add_argument(\"--job\")\n args = parser.parse_args()\n\n try:\n do(args)\n while args.auto:\n time.sleep(args.auto)\n print(CLEAR)\n try:\n do(args)\n except requests.exceptions.ConnectionError:\n now = time.strftime(\"%h %d, %H:%M\")\n print(now + \" - [ERROR] Autome API server not reachable\")\n except KeyboardInterrupt:\n pass\n\nif __name__ == '__main__':\n main()\n", "repo_name": "alobbs/autome", "sub_path": "chief/chief-client.py", "file_name": "chief-client.py", "file_ext": "py", "file_size_in_byte": 1722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "conf.CHIEF_API_PORT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 26, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 32, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 34, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 38, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 61, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "13350823984", "text": "import sys\nsys.path.insert(1,\"../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n\n\ndef hexdev_394():\n path = pyunit_utils.locate(\"smalldata/covtype/covtype.20k.data\")\n c_types = [None] * 55\n c_types[10] = \"enum\"\n c_types[11] = \"enum\"\n c_types[12] = \"enum\"\n train = h2o.import_file(path, col_types=c_types)\n\n cols = train.col_names # This returned space for first column name\n x_cols = [colname for colname in cols if colname != \"C55\"]\n\n splits = train.split_frame()\n newtrain = splits[0]\n newvalid = splits[1]\n newtrain[54] = newtrain[54].asfactor()\n newvalid[54] = newvalid[54].asfactor()\n\n\n my_gbm = H2OGradientBoostingEstimator(distribution=\"multinomial\", ntrees=100, learn_rate=0.1, max_depth=6)\n my_gbm.train(x=x_cols,y=54,training_frame=newtrain, validation_frame=newvalid)\n\n split1, split2 = train.split_frame()\n split1[54] = split1[54].asfactor()\n split2[54] = split2[54].asfactor()\n\n my_gbm = H2OGradientBoostingEstimator(distribution=\"multinomial\",\n ntrees=100,\n learn_rate=0.1,\n max_depth=6)\n my_gbm.train(x=x_cols,y=54,training_frame=split1,validation_frame=split2)\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(hexdev_394)\nelse:\n hexdev_394()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_misc/pyunit_HEXDEV_394.py", "file_name": "pyunit_HEXDEV_394.py", "file_ext": "py", "file_size_in_byte": 1373, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 9, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 9, "usage_type": "name"}, {"api_name": "h2o.import_file", "line_number": 14, "usage_type": "call"}, {"api_name": "h2o.estimators.gbm.H2OGradientBoostingEstimator", "line_number": 26, "usage_type": "call"}, {"api_name": "h2o.estimators.gbm.H2OGradientBoostingEstimator", "line_number": 33, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 41, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "17620583033", "text": "from zope import interface, component\nfrom Products.Five.viewlet.manager import ViewletManagerBase\nfrom Products.ZenUtils.jsonutils import json\nfrom Products.Five.viewlet import viewlet\nfrom interfaces import ISecurityManager, IPermissionsDeclarationViewlet\nfrom AccessControl import getSecurityManager\nfrom Products.ZenUtils.guid.interfaces import IGlobalIdentifier\nfrom Products.Zuul.interfaces import IAuthorizationTool\nfrom collective.beaker.interfaces import ISession\n\nZAUTH_COOKIE = 'ZAuthToken'\n\nclass SecurityManager(ViewletManagerBase):\n \"\"\"The Viewlet manager class for the permissions declaration\n \"\"\"\n interface.implements(ISecurityManager)\n\n\ndef permissionsForContext(context):\n \"\"\"\n Given a context (zope object) returns all the permissions\n the logged in user has.\n \"\"\"\n manager = getSecurityManager()\n all_permissions = context.zport.acl_users.possible_permissions()\n\n # filter out the ones we have in this context\n valid_permissions = [permission for permission in all_permissions\n if manager.checkPermission(permission, context)]\n\n # turn the list into a dictionary to make it easier to look up on\n # the client side (just look up the key instead of iterating)\n perms = {}\n for permission in valid_permissions:\n perms[permission.lower()] = True\n return perms\n\nclass PermissionsDeclaration(viewlet.ViewletBase):\n \"\"\"This is responsible for sending to the client side\n which permissions the user has\n \"\"\"\n interface.implements(IPermissionsDeclarationViewlet)\n\n def render(self):\n \"\"\"Creates a global function in JavaScript that returns the\n json encoding of all the permissions available to the current\n user in the current context. The permissions will be in the\n form of a dictionary.\n \"\"\"\n self._setAuthorizationCookie()\n permissions = self.permissionsForCurrentContext()\n managedObjectGuids = self.getManagedObjectGuids(returnChildrenForRootObj=True)\n data = json(permissions)\n func = \"\"\"\n\n \"\"\" % (data, json(managedObjectGuids), str(self.hasGlobalRoles()).lower())\n return func\n\n def _setAuthorizationCookie(self):\n session = ISession(self.context.REQUEST)\n authorization = IAuthorizationTool(self.context)\n token = authorization.createAuthToken(self.request)\n\n self.request.response.setCookie(ZAUTH_COOKIE, token['id'], path=\"/\", secure=session.secure, http_only=True)\n\n def hasGlobalRoles(self):\n \"\"\"\n @return True/False if the user has global roles\n \"\"\"\n us = self.context.dmd.ZenUsers.getUserSettings()\n return not us.hasNoGlobalRoles()\n\n def permissionsForCurrentContext(self):\n \"\"\"Given a context return a list of all the permissions the logged in\n user has.\n \"\"\"\n return permissionsForContext(self.context)\n\n def getManagedObjectGuids(self, returnChildrenForRootObj=False):\n \"\"\"\n If the currently logged in user is a restricted user this will return\n all of the guids for items he can administer.\n \"\"\"\n guids = []\n us = self.context.dmd.ZenUsers.getUserSettings()\n if us.hasNoGlobalRoles():\n guids = us.getAllAdminGuids(returnChildrenForRootObj=returnChildrenForRootObj)\n return guids\n", "repo_name": "zenoss/zenoss-prodbin", "sub_path": "Products/ZenUI3/security/security.py", "file_name": "security.py", "file_ext": "py", "file_size_in_byte": 3583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Products.Five.viewlet.manager.ViewletManagerBase", "line_number": 13, "usage_type": "name"}, {"api_name": "zope.interface.implements", "line_number": 16, "usage_type": "call"}, {"api_name": "interfaces.ISecurityManager", "line_number": 16, "usage_type": "argument"}, {"api_name": "zope.interface", "line_number": 16, "usage_type": "name"}, {"api_name": "AccessControl.getSecurityManager", "line_number": 24, "usage_type": "call"}, {"api_name": "Products.Five.viewlet.viewlet.ViewletBase", "line_number": 38, "usage_type": "attribute"}, {"api_name": "Products.Five.viewlet.viewlet", "line_number": 38, "usage_type": "name"}, {"api_name": "zope.interface.implements", "line_number": 42, "usage_type": "call"}, {"api_name": "interfaces.IPermissionsDeclarationViewlet", "line_number": 42, "usage_type": "argument"}, {"api_name": "zope.interface", "line_number": 42, "usage_type": "name"}, {"api_name": "Products.ZenUtils.jsonutils.json", "line_number": 53, "usage_type": "call"}, {"api_name": "Products.ZenUtils.jsonutils.json", "line_number": 68, "usage_type": "call"}, {"api_name": "collective.beaker.interfaces.ISession", "line_number": 72, "usage_type": "call"}, {"api_name": "Products.Zuul.interfaces.IAuthorizationTool", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "71754970113", "text": "##########################################################################\n################################ SLITHER #################################\n##########################################################################\n\n'''\nWHAT IS THIS?\n Digital snakes that each have their own genetically evolved artificial intelligence. The snakes live in a\n virtual 2D world with confined resources/energy. Consuming energy increases the length of the snake at a\n one-to-one ratio through an extension of the length of snake (from its tail) in the next iteration of the world.\n The snakes must move head first in each iteration of the world. They can curl up/overlap on themselves,\n but if they move into a space occupied by another snake, they will die, and their body will be converted into\n energy at a one-to-one ratio. The rules of the world are inspired by slither.io.\n'''\n\nimport operator\nprint(dir(operator))\nimport numpy\nimport random\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport platform\nif platform.system() == 'Darwin':\n matplotlib.use('MacOSX')\nelse:\n matplotlib.use('TkAgg')\n\nimport copy\nimport traceback\nfrom time import sleep\n\nclass World():\n def __init__(self, size=100, initial_being_spawn_count=100, initial_energy_spawn_count=150):\n self.object_world = numpy.zeros(shape=(size,size), dtype=object)\n self.id_world = numpy.zeros(shape=(size,size))\n self.size = size\n self.Emperor_DNA = False # Longest Being ever to live in this World\n self.Emperor_length = 0\n self.King_DNA = False # Longest Being that is alive\n self.King_length = 0\n self.Queen_DNA = False # Second longest Being that is alive\n self.Queen_length = 0\n self.all_Beings = self.spawn_beings(initial_being_spawn_count)\n self.uneaten_Energy = self.spawn_energy(initial_energy_spawn_count)\n self.World_age = 0\n self.all_potential_locations = []\n\n class Energy(): # Not necessary to have Energy object at this point but adding so have it for later\n def __init__(self, location, energy_count=1):\n self.location = location # Nested lists with X, Y coordinates; len(Energy.location) always = 1\n self.energy_count = energy_count\n self.energy_id = 2\n # self.energy_id = self.energy_count + 1 # Can use thiis if want energy_count to be visible in id_world\n\n class Being():\n\n def __init__(self, _World, location, parent_DNA=False):\n self.location = location # Nested lists with X, Y coordinates\n self.head = location[0] # List with X, Y coordinates\n self.energy = 0\n self.age = 0\n random.seed()\n self.head_id = random.uniform(1.0, 1.25)\n self.body_id = random.uniform(1.26, 1.50)\n # There is the technical potential for colisions (eg,\n # two identical head_id)\n\n if parent_DNA != False:\n random.seed()\n random_index = random.choice(range(len(str(parent_DNA))))\n random_digit = random.choice(list('0123456799'))\n baby_DNA = int((str(parent_DNA)[:random_index] + random_digit + str(parent_DNA)[random_index + 1:]))\n self.DNA = baby_DNA\n # elif (_World.King_DNA != False) and (_World.Queen_DNA != False):\n # try:\n # baby_DNA = \"\"\n # for index in range(len(str(_World.King_DNA))):\n # random.seed()\n # chosen_DNA = random.choice([_World.King_DNA, _World.Queen_DNA])\n # baby_DNA = baby_DNA + str(chosen_DNA)[index]\n # self.DNA = int(baby_DNA)\n # except Exception as e:\n # print(\"Uh oh... Ran into error while compiling baby_DNA: \" + str(e))\n # traceback.print_exc()\n # print(\"Generating random DNA instead...\")\n # random.seed(self.head_id)\n # random_DNA = [random.choice(list('0123456799')) for i in range(200)]\n # random_DNA = ''.join(random_DNA)\n # random_DNA = int(random_DNA)\n # self.DNA = random_DNA\n # elif (_World.King_DNA != False) and (_World.Queen_DNA == False):\n # random.seed()\n # random_index = random.choice(range(len(str(_World.King_DNA))))\n # random_digit = random.choice(list('0123456799'))\n # baby_DNA = int((str(_World.King_DNA)[:random_index] + random_digit + \\\n # str(_World.King_DNA)[random_index + 1:]))\n # self.DNA = baby_DNA\n else:\n random.seed(self.head_id)\n random_DNA = [random.choice(list('0123456799')) for i in range(250)]\n random_DNA = ''.join(random_DNA)\n random_DNA = int(random_DNA)\n self.DNA = random_DNA\n\n def update_Being(self, _World):\n self.age = self.age + 1\n self.head = _World.choose_move(self)\n # self.head = random.choice(potential_locations) # (UPDATE TO ALLOW FOR MORE SOPHISTICATED CHOICE/MUTATIONS LATER)\n if self.head == None: # This is temporary fix for rare error where Being has no neck for some reason.\n pass\n elif self.energy > 0:\n new_location = [self.head]\n for coordinates in self.location:\n new_location.append(coordinates)\n self.location = new_location\n self.energy = self.energy - 1\n elif self.energy == 0:\n new_location = [self.head]\n for coordinates in self.location:\n new_location.append(coordinates)\n del new_location[-1]\n self.location = new_location\n return_package = [self]\n if ((self.age % 100) == 0) and (len(self.location) > 5):\n baby_body = self.location.pop()\n baby_head = self.location.pop()\n baby_location = [baby_head, baby_body]\n print(\"baby_location = \" + str(baby_location))\n baby_Being = _World.Being(_World, baby_location, parent_DNA=self.DNA)\n _World.all_Beings.append(baby_Being)\n counter = 1\n for coordinates in baby_location: # Think this is unecessary b/c worlds will be recompiled from\n # all_Beings\n if counter == 1:\n _World.object_world[coordinates[0], coordinates[1]] = baby_Being\n _World.id_world[coordinates[0], coordinates[1]] = baby_Being.head_id\n counter = counter + 1\n else:\n _World.object_world[coordinates[0], coordinates[1]] = baby_Being.body_id\n _World.id_world[coordinates[0], coordinates[1]] = baby_Being.body_id\n return_package.append(baby_Being)\n\n return return_package\n\n def update_World(self):\n # Every 100 years, spawn Energy if total_energy is below 298 (World somehow losing energy--this is just a patch\n # until figure out what's going on)\n total_energy = sum([Energy.energy_count for Energy in world.uneaten_Energy] + \\\n [len(Being.location) for Being in world.all_Beings] + \\\n [Being.energy for Being in world.all_Beings])\n # print(\"Total Energy: \" + str(total_energy))\n # print(\"\\tUneaten Energy: \" + str(sum([Energy.energy_count for Energy in world.uneaten_Energy])))\n # print(\"\\tSum of Being's Locations: \" + str(sum([len(Being.location) for Being in world.all_Beings])))\n # print(\"\\tSum of Being's Unused Energy: \" + str(sum([Being.energy for Being in world.all_Beings])))\n if total_energy < 350:\n self.uneaten_Energy = self.uneaten_Energy + self.spawn_energy(350 - total_energy)\n\n # After specified interval, spawn new Beings if population running low and sufficient uneaten_Energy\n if ((self.World_age % 51) == 0):\n if len(self.all_Beings) < 50:\n total_energy = sum([Energy.energy_count for Energy in world.uneaten_Energy] + \\\n [len(Being.location) for Being in world.all_Beings] + \\\n [Being.energy for Being in world.all_Beings])\n if len(self.uneaten_Energy) > 50 - len(self.all_Beings) * 3:\n spawn_count = 50 - len(self.all_Beings)\n else:\n spawn_count = int((len(self.uneaten_Energy) - 1) / 2)\n spawned_Beings = self.spawn_beings(spawn_count)\n self.all_Beings = self.all_Beings + spawned_Beings\n for i in range(spawn_count * 2):\n random.seed()\n try:\n del self.uneaten_Energy[\n random.choice(range(len(self.uneaten_Energy)))] # Remove Energy to keep equilibrium\n except:\n pass\n\n # Compile updated beings\n updated_beings = []\n copied_all_Beings = copy.deepcopy(self.all_Beings)\n\n for being in copied_all_Beings:\n # Not sure if have to do below or could just do updated_beings.append(being.update_Being())\n return_package = being.update_Being(self)\n for updated_being in return_package: # May include baby\n updated_beings.append(updated_being)\n\n # Create blank updated worlds\n updated_object_world = numpy.zeros(shape=(self.size,self.size), dtype=object)\n updated_id_world = numpy.zeros(shape=(self.size,self.size))\n\n # Creating lists b/c think faster to iterate through them rather than full world\n updated_being_heads = [updated_being.head for updated_being in updated_beings]\n updated_being_bodies = [updated_being.location[1:] for updated_being in updated_beings]\n uneaten_Energy_locations = [[Energy.location[0][0], Energy.location[0][1]] for Energy in self.uneaten_Energy]\n\n # Incorporate Energy to updated worlds and uneaten_energy_locations\n for Energy in self.uneaten_Energy:\n updated_object_world[Energy.location[0][0], Energy.location[0][1]] = Energy\n updated_id_world[Energy.location[0][0], Energy.location[0][1]] = Energy.energy_id\n\n # Incorporate updated_beings to updated worlds\n for updated_being in updated_beings:\n # Shrink one block every 300 years\n if ((updated_being.age % 300) == 0):\n coordinates = updated_being.location.pop(-1) # Delete last block (tail)\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count=1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n self.uneaten_Energy.append(realeased_Energy)\n\n # If beings' heads collided, both beings die in their updated locations (heads overlapping), with any unused\n # energy of either beings releasing where their heads overlapped\n if updated_being_heads.count(updated_being.head) > 1:\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n if isinstance(updated_object_world[coordinates[0], coordinates[1]], self.Energy):\n updated_Energy = updated_object_world[coordinates[0], coordinates[1]]\n updated_Energy.energy_count = updated_Energy.energy_count + 1 + updated_being.energy\n updated_object_world[coordinates[0], coordinates[1]] = updated_Energy\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]],\n energy_count = 1 + updated_being.energy)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count = 1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n # If being collides with other being's body, being dies without updating location, with any unused\n # releasing at its head\n elif any(updated_being.head in location for location in updated_being_bodies) and \\\n (updated_being.location.count(updated_being.head) == 1):\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]],\n energy_count=1 + updated_being.energy)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count = 1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n # If being collides with energy, the being consumes the energy and saves it for growing in subsequent\n # round(s)\n elif updated_being.head in uneaten_Energy_locations:\n updated_being.energy = updated_being.energy + \\\n updated_object_world[updated_being.head[0], updated_being.head[1]].energy_count\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.head_id\n else:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being.body_id\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.body_id\n uneaten_Energy_locations.remove(updated_being.head)\n # If being too small for age, kill being\n elif ((updated_being.age % 50) == 0) and len(updated_being.location) < (2 + (updated_being.age/50)):\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]],\n energy_count=1 + updated_being.energy)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count = 1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n pass\n # If being collides with nothing, it's location is updated unless it is told, in which case it dies\n else:\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.head_id\n else:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being.body_id\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.body_id\n\n # Update world\n self.object_world = updated_object_world\n self.id_world = updated_id_world\n updated_all_Beings = []\n updated_uneaten_Energy = []\n for coordinates, obj in numpy.ndenumerate(self.object_world): # Iterate through current object_world\n if isinstance(obj, self.Being):\n updated_all_Beings.append(obj)\n if isinstance(obj, self.Energy):\n updated_uneaten_Energy.append(obj)\n self.all_Beings = updated_all_Beings\n self.uneaten_Energy = updated_uneaten_Energy\n for Being in self.all_Beings:\n if len(Being.location) > self.Emperor_length:\n self.Emperor_DNA = Being.DNA\n self.Emperor_length = len(Being.location)\n if len(Being.location) > self.King_length:\n self.Queen_DNA = self.King_DNA\n self.Queen_length = self.King_length\n self.King_DNA = Being.DNA\n self.King_length = len(Being.location)\n elif len(Being.location) > self.Queen_length:\n self.Queen_DNA = Being.DNA\n self.Queen_length = len(Being.location)\n self.World_age = self.World_age + 1\n\n\n def choose_math_operation(self, DNA_strand):\n ops = [\n # Returning number\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.floordiv,\n # operator.pow, # This resulted in TypeError: can't convert complex to float\n operator.mod,\n # Returning boolean (can convert to number)\n # operator.lt,\n # operator.le,\n # operator.eq,\n # operator.gt,\n # operator.ge,\n # operator.ne,\n ]\n random.seed(DNA_strand)\n operation = random.choice(ops)\n return operation\n\n def processing_single_cell(_World, Being, cell, potential_locations, DNA):\n processing_results = []\n try:\n cell_id = _World.id_world[cell[0], cell[1]]\n except: # Cell isn't part of World grid\n cell_id = 0\n processing_results.append(cell_id)\n is_option = int(bool(str(cell_id) in str(potential_locations)))\n processing_results.append(is_option)\n diff_x = cell[0] - Being.head[0]\n processing_results.append(diff_x)\n diff_y = cell[1] - Being.head[0]\n processing_results.append(diff_y)\n '''\n for i in range(3):\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # for i in range (number_of_operations): # Implement this later\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n n = random.uniform(-1,1)\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n operation = _World.choose_math_operation(DNA)\n calc_result = operation(cell_id, n)\n processing_results.append(calc_result)\n '''\n return [processing_results, DNA]\n\n\n def compile_potential_locations(self):\n all_potential_locations = []\n for Being in self.all_Beings:\n try: # Not sure why this runs into following error sometimes: neckX = Being.location[1][0] --> IndexError: list index out of range\n headX = Being.head[0]\n headY = Being.head[1]\n neckX = Being.location[1][0]\n neckY = Being.location[1][1]\n if headX == neckX: # Traveling vertically\n if (headY - neckY) == 1: # Traveling north\n forward = [headX, headY + 1]\n left = [headX - 1, headY]\n right = [headX + 1, headY]\n elif (headY - neckY) == -1: # Traveling south\n forward = [headX, headY - 1]\n left = [headX + 1, headY]\n right = [headX - 1, headY]\n elif headY == neckY: # Traveling horizontally\n if (headX - neckX) == 1: # Traveling east\n forward = [headX + 1, headY]\n left = [headX, headY + 1]\n right = [headX, headY - 1]\n elif (headX - neckX) == -1: # Traveling West\n forward = [headX - 1, headY]\n left = [headX, headY - 1]\n right = [headX, headY + 1]\n all_potential_locations.append(forward)\n all_potential_locations.append(left)\n all_potential_locations.append(right)\n except:\n pass\n self.all_potential_locations = all_potential_locations\n return all_potential_locations\n\n def choose_move(_World, Being):\n\n try:\n\n def vision(_World, Being):\n\n def near_vision(_World, Being):\n x = Being.head[0]\n y = Being.head[1]\n near_vision = [\n [x-2, y+2], [x-1, y+2], [x, y+2], [x+1, y+2], [x+2, y+2],\n [x-2, y+1], [x-1, y+1], [x, y+1], [x+1, y+1], [x+2, y+1],\n [x-2, y], [x-1, y], [x, y], [x+1, y], [x+2, y],\n [x-2, y-1], [x-1, y-1], [x, y-1], [x+1, y-1], [x+2, y-1],\n [x-2, y-2], [x-1, y-2], [x, y-2], [x+1, y-2], [x+2, y-2],\n ]\n return near_vision\n\n def straight_vision(_World, Being):\n\n def north_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x, y+1]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n y = y+1\n return north_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n def south_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x, y-1]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n y = y-1\n return south_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n def east_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x + 1, y]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n x = x + 1\n return east_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n def west_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x-1, y]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n x = x-1\n return west_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n headX = Being.head[0]\n headY = Being.head[1]\n neckX = Being.location[1][0]\n neckY = Being.location[1][1]\n return_package = north_distance_to_object(_World, headX, headY)\n north_distance = return_package[0]\n north_energy = return_package[1]\n return_package = south_distance_to_object(_World, headX, headY)\n south_distance = return_package[0]\n south_energy = return_package[1]\n return_package = east_distance_to_object(_World, headX, headY)\n east_distance = return_package[0]\n east_energy = return_package[1]\n return_package = west_distance_to_object(_World, headX, headY)\n west_distance = return_package[0]\n west_energy = return_package[1]\n\n if headX == neckX: # Traveling vertically\n if (headY - neckY) == 1: # Traveling north\n forward = [headX, headY + 1]\n forward_distance = north_distance\n forward_energy = north_energy\n left = [headX - 1, headY]\n left_distance = west_distance\n left_energy = west_energy\n right = [headX + 1, headY]\n right_distance = east_distance\n right_energy = east_energy\n elif (headY - neckY) == -1: # Traveling south\n forward = [headX, headY - 1]\n forward_distance = south_distance\n forward_energy = south_energy\n left = [headX + 1, headY]\n left_distance = east_distance\n left_energy = east_energy\n right = [headX - 1, headY]\n right_distance = west_distance\n right_energy = west_energy\n elif headY == neckY: # Traveling horizontally\n if (headX - neckX) == 1: # Traveling east\n forward = [headX + 1, headY]\n forward_distance = east_distance\n forward_energy = east_energy\n left = [headX, headY + 1]\n left_distance = north_distance\n left_energy = north_energy\n right = [headX, headY - 1]\n right_distance = south_distance\n right_energy = south_energy\n elif (headX - neckX) == -1: # Traveling West\n forward = [headX - 1, headY]\n forward_distance = west_distance\n forward_energy = west_energy\n left = [headX, headY -1]\n left_distance = south_distance\n left_energy = south_energy\n right = [headX, headY + 1]\n right_distance = north_distance\n right_energy = north_energy\n\n if (forward_distance == 2) and (forward_energy == False):\n forward_danger = True\n else:\n forward_danger = False\n if (left_distance == 2) and (left_energy == False):\n left_danger = True\n else:\n left_danger = False\n if (right_distance == 2) and (right_energy == False):\n right_danger = True\n else:\n right_danger = False\n\n die_locations = []\n try:\n if (int(_World.id_world[forward[0], forward[1]]) == 1) and (forward not in Being.location):\n forward_die = 1\n die_locations.append(forward)\n else:\n forward_die = 0\n except: # Off grid\n forward_die = 1\n try:\n if (int(_World.id_world[left[0], left[1]]) == 1) and (left not in Being.location):\n left_die = 1\n die_locations.append(left)\n else:\n left_die = 0\n except: # Off grid\n left_die = 1\n try:\n if (int(_World.id_world[right[0], right[1]]) == 1) and (right not in Being.location):\n right_die = 1\n die_locations.append(right)\n else:\n right_die = 0\n except: # Off grid\n right_die = 1\n\n potential_locations = [forward, left, right]\n distances = [1 - forward_distance/100, 1 - left_distance/100, 1 - right_distance/100]\n whether_energy = [int(forward_energy), int(left_energy), int(right_energy)]\n danger = [int(forward_danger), int(left_danger), int(right_danger)]\n die = [forward_die, left_die, right_die]\n output_package = [potential_locations, distances, whether_energy, danger, die, die_locations]\n return output_package\n\n output_package = straight_vision(_World, Being)\n # near_vision = near_vision(_World, Being)\n\n return output_package\n\n def smell(_World, Being, potential_locations):\n head_location = Being.head\n headX = head_location[0]\n headY = head_location[1]\n uneaten_Energy = _World.uneaten_Energy\n closest_Energy_location = False\n for Energy in uneaten_Energy:\n EnergyX = Energy.location[0][0]\n EnergyY = Energy.location[0][1]\n # print(\"Energy Location: \" + str(EnergyX) + \", \" + str(EnergyY))\n distance = abs(headX-EnergyX) + abs(headY-EnergyY)\n if closest_Energy_location == False:\n closest_Energy_location = Energy.location[0]\n closest_distance = distance\n elif closest_distance > distance:\n closest_Energy_location = Energy.location[0]\n closest_distance = distance\n try:\n CEL_X = closest_Energy_location[0]\n CEL_Y = closest_Energy_location[1]\n closest_potential_location = False\n for option in potential_locations:\n optionX = option[0]\n optionY = option[1]\n distance = abs(CEL_X-optionX) + abs(CEL_Y-optionY)\n if closest_potential_location == False:\n closest_potential_location = option\n closest_distance = distance\n elif closest_distance > distance:\n closest_potential_location = option\n closest_distance = distance\n except:\n closest_potential_location = None\n closest_distance = 0\n return_package = [closest_potential_location, closest_distance]\n return return_package\n\n\n output_package = vision(_World, Being)\n potential_locations = output_package[0]\n distances = output_package[1]\n whether_energy = output_package[2]\n danger = output_package[3]\n die = output_package[4]\n die_locations = output_package[5]\n data_list = distances + whether_energy + danger + die\n return_package = smell(_World, Being, potential_locations)\n potential_location_closest_to_Energy = return_package[0]\n distance_to_closest_Energy = return_package[1]\n data_list.append(distance_to_closest_Energy)\n all_potential_locations = _World.compile_potential_locations()\n pre_processing_results = []\n\n ### OLD WAY:\n # for cell in near_vision:\n # output = _World.processing_single_cell(Being, cell, potential_locations, DNA)\n # results = output[0]\n # DNA = output[1]\n # for result in results:\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # processing_results.append(result * random.uniform(-1,1))\n\n ### NEW WAY (STILL NOT SOPHISTICATED):\n for data in data_list:\n ### Pre-Process Option 1\n pre_processing_results.append(data)\n ### Pre-Process Option 2\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # input = sight_result\n # for i in range (number_of_operations): # Implement this later\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-1,1)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # input = operation(input, n)\n # pre_processing_results.append(input)\n pass\n\n ### Pre-Process Option 3\n # for i in range(5):\n # intake_neuron = []\n # for sight_result in sight_results:\n # input = sight_result\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-1, 1)\n # intake_neuron.append(n * input)\n # intake_result = sum(intake_neuron)\n # pre_processing_results.append(intake_result)\n\n\n processing_results = []\n for pre_processing_result in pre_processing_results:\n ### Option 1\n processing_results.append(pre_processing_result)\n\n ### Option 2\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # input = pre_processing_result\n # for i in range (number_of_operations):\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-1,1)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # input = operation(input, n)\n # processing_results.append(input)\n # for i in range(3):\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # input1 = random.choice(pre_processing_results)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # for i in range (number_of_operations):\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # input2 = random.choice(pre_processing_results)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # try:\n # input1 = operation(input1, input2)\n # except:\n # input1 = 0\n # processing_results.append(input1)\n\n first_loop = True\n for potential_location in potential_locations:\n DNA = Being.DNA\n assessment_value = 0\n for result in processing_results:\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n assessment_value = assessment_value + (result * random.uniform(-1,1))\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n potential_location_conflict_number = (all_potential_locations.count(potential_location) - 1)\n # assessment_value = assessment_value + (potential_location_conflict_number * random.uniform(-100, 100))\n assessment_value = assessment_value + (potential_location_conflict_number * 100) # FOR DEBUGGING PURPOSES\n if potential_location == potential_location_closest_to_Energy:\n ### Option 1\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # variable = distance_to_closest_Energy\n # for i in range(number_of_operations): # Implement this later\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-10, 10)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # variable = operation(variable, n)\n # random.seed(int(str(DNA)[0:8]))\n # DNA = int(str(DNA)[2:])\n # assessment_value = assessment_value + (variable * random.uniform(-1,1))\n ### Option 2\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n n = random.uniform(-10, 10)\n assessment_value = assessment_value + (n * random.uniform(-1,1))\n if potential_location in die_locations:\n ### Option 1\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # variable = 1\n # for i in range(number_of_operations): # Implement this later\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-10, 10)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # variable = operation(variable, n)\n # assessment_value = assessment_value + (variable * random.uniform(-10,10))\n ### Option 2\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n # assessment_value = assessment_value - (assessment_value * random.uniform(-100,100))\n assessment_value = assessment_value + abs(assessment_value * 100) # FOR DEBUGGING PURPOSES\n if first_loop == True:\n if (potential_location[0] <= 99) and (potential_location[0] >= 0) and (potential_location[1] <= 99) and \\\n (potential_location[1] >= 0): # Off the grid\n chosen_move = potential_location\n first_loop = False\n best = assessment_value\n else:\n if float(assessment_value) < float(best):\n if (potential_location[0] <= 99) and (potential_location[0] >= 0) and (potential_location[1] <= 99) and \\\n (potential_location[1] >= 0): # Off the grid\n chosen_move = potential_location\n return chosen_move\n\n except Exception as e:\n print(\"Uh oh... Ran into error while choosing move: \" + str(e))\n traceback.print_exc()\n return None\n\n def compile_random_spawn_locations(self, spawn_count, spawning_beings=True):\n\n def add_random_location(spawn_locations, spawning_beings=True, taken_locations=[]):\n if spawning_beings == True:\n random.seed()\n random_head_location = [random.choice(range(99)), random.choice(range(99))]\n if random_head_location not in taken_locations:\n x = random_head_location[0]\n y = random_head_location[1]\n potential_neck_locations = [[x, y + 1], [x - 1, y], [x + 1, y], [x, y - 1]]\n random_neck_location = spawn_random_neck_location(random_head_location, potential_neck_locations,\n spawn_locations, taken_locations=taken_locations)\n if random_neck_location != False:\n random_location = [random_head_location, random_neck_location]\n spawn_locations.append(random_location)\n taken_locations.append(random_head_location)\n taken_locations.append(random_neck_location)\n return [spawn_locations, taken_locations]\n else:\n return add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n else:\n return add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n elif spawning_beings == False:\n random_location = [[random.choice(range(99)), random.choice(range(99))]]\n if random_location not in taken_locations:\n spawn_locations.append(random_location)\n taken_locations.append(random_location)\n return [spawn_locations, taken_locations]\n else:\n return add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n\n def spawn_random_neck_location(head_location, potential_neck_locations, spawn_locations, taken_locations=[]):\n random_neck_location = random.choice(potential_neck_locations)\n if random_neck_location not in taken_locations:\n return random_neck_location\n else:\n potential_neck_locations.remove(random_neck_location)\n if not potential_neck_locations:\n return False\n elif potential_neck_locations:\n return spawn_random_neck_location(head_location, potential_neck_locations, spawn_locations,\n taken_locations=taken_locations)\n\n spawn_locations = []\n taken_locations = []\n for coordinates, id in numpy.ndenumerate(self.id_world):\n if id != 0:\n taken_locations.append(coordinates)\n for i in range(spawn_count):\n output = add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n spawn_locations = output[0]\n taken_locations = output[1]\n return spawn_locations\n\n def spawn_beings(self, spawn_count):\n locations = self.compile_random_spawn_locations(spawn_count, spawning_beings=True)\n Beings = []\n for location in locations:\n spawned_Being = self.Being(self, location)\n counter = 1\n for coordinates in location:\n if counter == 1:\n self.object_world[coordinates[0], coordinates[1]] = spawned_Being\n self.id_world[coordinates[0], coordinates[1]] = spawned_Being.head_id\n counter = counter + 1\n else:\n self.object_world[coordinates[0], coordinates[1]] = spawned_Being.body_id\n self.id_world[coordinates[0], coordinates[1]] = spawned_Being.body_id\n Beings.append(spawned_Being)\n return(Beings)\n\n def spawn_energy(self, spawn_count, energy_count=1):\n locations = self.compile_random_spawn_locations(spawn_count, spawning_beings=False)\n uneaten_Energy = []\n for location in locations:\n spawned_Energy = self.Energy(location, energy_count=energy_count)\n for coordinates in location:\n self.object_world[coordinates[0], coordinates[1]] = spawned_Energy\n self.id_world[coordinates[0], coordinates[1]] = spawned_Energy.energy_id\n uneaten_Energy.append(spawned_Energy)\n return(uneaten_Energy)\n\n # class universal_laws():\n # def __init__(self, location):\n\n # class chromosomes():\n # def __init__(self, location):\n\n\n\ndef operate(a, b, operation):\n try:\n operation(a, b)\n except:\n try:\n operation(a)\n except:\n False\n\nif __name__ == \"__main__\":\n # def main_run():\n world = World()\n plt.imshow(world.id_world)\n plt.clim(0, 30) # colorbar will be based on min value of 0 and max value of 30\n plt.colorbar()\n plt.pause(0.01)\n while True:\n # for i in range(3):\n # if len(world.all_Beings) < 25:\n # print(\"Spawning 25 new Beings\")\n # world.all_Beings = world.all_Beings + world.spawn_beings(25)\n world.update_World()\n plt.clf()\n total_energy = sum([Energy.energy_count for Energy in world.uneaten_Energy] + \\\n [len(Being.location) for Being in world.all_Beings] + \\\n [Being.energy for Being in world.all_Beings])\n plt.title(\"World Age: \" + str(world.World_age) + \"\\nTotal Energy: \" + str(total_energy) + \\\n \"\\nEmperor Length: \" + str(world.Emperor_length) + \"\\n King Length: \" + str(world.King_length))\n plt.imshow(world.id_world)\n plt.colorbar()\n plt.pause(0.01)\n print(\"Loop \" + str(world.World_age) + \" complete. Emperor Length = \" + str(world.Emperor_length) + \\\n \" Emperor_DNA = \" + str(world.Emperor_DNA) + \" + King_DNA = \" + str(world.King_DNA) + \\\n \" Queen_DNA = \" + str(world.Queen_DNA))\n\n\n # import cProfile\n # pr = cProfile.Profile()\n # pr.enable()\n # main_run()\n # pr.disable()\n # pr.print_stats(sort='time')\n\n\n", "repo_name": "brokeharvard/FreeWill", "sub_path": "slither.py", "file_name": "slither.py", "file_ext": "py", "file_size_in_byte": 49129, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "platform.system", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 61, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 62, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 63, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 68, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 69, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 70, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 98, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 99, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 170, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 173, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.ndenumerate", "line_number": 284, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 309, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 310, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 311, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 312, "usage_type": "attribute"}, {"api_name": "operator.floordiv", "line_number": 313, "usage_type": "attribute"}, {"api_name": "operator.mod", "line_number": 315, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 324, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 325, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 734, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 735, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 737, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 760, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 761, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 762, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 780, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 798, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 805, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 806, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 826, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 836, "usage_type": "call"}, {"api_name": "numpy.ndenumerate", "line_number": 849, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 907, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 907, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clim", "line_number": 908, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 908, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 909, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 909, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 910, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 910, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 917, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 917, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 921, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 921, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 923, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 923, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 924, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 924, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 925, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 925, "usage_type": "name"}]} +{"seq_id": "19738102612", "text": "from crispy_forms.helper import FormHelper\r\nfrom crispy_forms.layout import Submit\r\nfrom django import forms\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom issues import models\r\nfrom issues.models import ProposalType\r\n\r\n\r\nclass BaseIssueForm(forms.ModelForm):\r\n class Meta:\r\n model = models.Issue\r\n fields = (\r\n 'title',\r\n 'abstract',\r\n )\r\n\r\n\r\nclass CreateIssueForm(BaseIssueForm):\r\n\r\n def __init__(self, *args, **kwargs):\r\n self.helper = FormHelper()\r\n self.helper.form_tag = False\r\n\r\n super(CreateIssueForm, self).__init__(*args, **kwargs)\r\n\r\n initial = {'type': ProposalType.ADMIN}\r\n\r\n self.new_proposal = CreateProposalBaseForm(prefix='proposal',\r\n data=self.data if self.is_bound else None,\r\n initial=initial)\r\n self.new_proposal.fields['type'].required = False\r\n\r\n def is_valid(self):\r\n valid = super(CreateIssueForm, self).is_valid()\r\n if self.data.get('proposal-type') == '':\r\n return valid\r\n return self.new_proposal.is_valid() and valid\r\n\r\n def save(self, commit=True):\r\n o = super(CreateIssueForm, self).save(commit)\r\n if self.data.get('proposal-type') != '':\r\n self.new_proposal.instance.issue = o\r\n self.new_proposal.instance.created_by = o.created_by\r\n self.new_proposal.save()\r\n return o\r\n\r\n\r\nclass UpdateIssueForm(BaseIssueForm):\r\n def __init__(self, *args, **kwargs):\r\n self.helper = FormHelper()\r\n\r\n self.helper.add_input(Submit('submit', _('Update')))\r\n\r\n super(UpdateIssueForm, self).__init__(*args, **kwargs)\r\n self.helper.form_tag = True\r\n\r\n\r\nclass CreateProposalBaseForm(forms.ModelForm):\r\n\r\n class Meta:\r\n model = models.Proposal\r\n fields = (\r\n 'type',\r\n 'title',\r\n 'content',\r\n 'assigned_to',\r\n 'due_by',\r\n )\r\n\r\n\r\nclass CreateProposalForm(CreateProposalBaseForm):\r\n\r\n submit_button_text = _('Create')\r\n\r\n def __init__(self, *args, **kwargs):\r\n self.helper = FormHelper()\r\n\r\n self.helper.add_input(Submit('submit', self.submit_button_text))\r\n\r\n super(CreateProposalForm, self).__init__(*args, **kwargs)\r\n\r\n\r\nclass EditProposalForm(CreateProposalForm):\r\n submit_button_text = _('Save')\r\n\r\n\r\nclass EditProposalTaskForm(EditProposalForm):\r\n\r\n class Meta:\r\n model = models.Proposal\r\n fields = (\r\n 'assigned_to',\r\n 'due_by',\r\n )\r\n\r\n\r\nclass CreateIssueCommentForm(forms.ModelForm):\r\n\r\n submit_label = _('Add')\r\n form_id = \"add-comment\"\r\n\r\n class Meta:\r\n model = models.IssueComment\r\n fields = (\r\n 'content',\r\n )\r\n\r\n def __init__(self, *args, **kwargs):\r\n self.helper = FormHelper()\r\n if self.form_id:\r\n self.helper.form_id = self.form_id\r\n\r\n self.helper.add_input(Submit('submit', self.submit_label))\r\n\r\n super(CreateIssueCommentForm, self).__init__(*args, **kwargs)\r\n\r\n\r\nclass EditIssueCommentForm(CreateIssueCommentForm):\r\n\r\n submit_label = _('Save')\r\n form_id = None\r\n", "repo_name": "itamaro/OpenCommunity", "sub_path": "src/issues/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.forms.ModelForm", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "issues.models.Issue", "line_number": 11, "usage_type": "attribute"}, {"api_name": "issues.models", "line_number": 11, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 21, "usage_type": "call"}, {"api_name": "issues.models.ProposalType.ADMIN", "line_number": 26, "usage_type": "attribute"}, {"api_name": "issues.models.ProposalType", "line_number": 26, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 50, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 52, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 52, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 58, "usage_type": "name"}, {"api_name": "issues.models.Proposal", "line_number": 61, "usage_type": "attribute"}, {"api_name": "issues.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 73, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 76, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 78, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 84, "usage_type": "call"}, {"api_name": "issues.models.Proposal", "line_number": 90, "usage_type": "attribute"}, {"api_name": "issues.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 97, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 99, "usage_type": "call"}, {"api_name": "issues.models.IssueComment", "line_number": 103, "usage_type": "attribute"}, {"api_name": "issues.models", "line_number": 103, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 109, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 113, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "25181384805", "text": "import copy\nimport datetime\nimport decimal\nimport json\nimport uuid\nimport pytest\nfrom boto3.dynamodb.types import TypeSerializer\nfrom botocore import stub\nfrom fixtures import context, lambda_module # pylint: disable=import-error\nfrom helpers import compare_event # pylint: disable=import-error,no-name-in-module\n\n\nlambda_module = pytest.fixture(scope=\"module\", params=[{\n \"function_dir\": \"table_update\",\n \"module_name\": \"main\",\n \"environ\": {\n \"ENVIRONMENT\": \"test\",\n \"EVENT_BUS_NAME\": \"EVENT_BUS_NAME\",\n \"POWERTOOLS_TRACE_DISABLED\": \"true\"\n }\n}])(lambda_module)\ncontext = pytest.fixture(context)\n\n\n@pytest.fixture\ndef order():\n now = datetime.datetime.now()\n\n return {\n \"orderId\": str(uuid.uuid4()),\n \"userId\": str(uuid.uuid4()),\n \"createdDate\": now.isoformat(),\n \"modifiedDate\": now.isoformat(),\n \"status\": \"NEW\",\n \"products\": [{\n \"productId\": str(uuid.uuid4()),\n \"name\": \"Test Product\",\n \"package\": {\n \"width\": 1000,\n \"length\": 900,\n \"height\": 800,\n \"weight\": 700\n },\n \"price\": 300,\n \"quantity\": 4\n }],\n \"address\": {\n \"name\": \"John Doe\",\n \"companyName\": \"Company Inc.\",\n \"streetAddress\": \"123 Street St\",\n \"postCode\": \"12345\",\n \"city\": \"Town\",\n \"state\": \"State\",\n \"country\": \"SE\",\n \"phoneNumber\": \"+123456789\"\n },\n \"deliveryPrice\": 200,\n \"total\": 1400\n }\n\n\n@pytest.fixture\ndef insert_data(order):\n record = {\n \"awsRegion\": \"us-east-1\",\n \"dynamodb\": {\n \"Keys\": {\n \"orderId\": {\"S\": order[\"orderId\"]}\n },\n \"NewImage\": {k: TypeSerializer().serialize(v) for k, v in order.items()},\n \"SequenceNumber\": \"1234567890123456789012345\",\n \"SizeBytes\": 123,\n \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n },\n \"eventID\": str(uuid.uuid4()),\n \"eventName\": \"INSERT\",\n \"eventSource\": \"aws:dynamodb\",\n \"eventVersion\": \"1.0\"\n }\n event = {\n \"Source\": \"ecommerce.orders\",\n \"Resources\": [order[\"orderId\"]],\n \"DetailType\": \"OrderCreated\",\n \"Detail\": json.dumps(order),\n \"EventBusName\": \"EVENT_BUS_NAME\"\n }\n\n return {\"record\": record, \"event\": event}\n\n\n@pytest.fixture\ndef remove_data(order):\n record = {\n \"awsRegion\": \"us-east-1\",\n \"dynamodb\": {\n \"Keys\": {\n \"orderId\": {\"S\": order[\"orderId\"]}\n },\n \"OldImage\": {k: TypeSerializer().serialize(v) for k, v in order.items()},\n \"SequenceNumber\": \"1234567890123456789012345\",\n \"SizeBytes\": 123,\n \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n },\n \"eventID\": str(uuid.uuid4()),\n \"eventName\": \"REMOVE\",\n \"eventSource\": \"aws:dynamodb\",\n \"eventVersion\": \"1.0\"\n }\n event = {\n \"Source\": \"ecommerce.orders\",\n \"Resources\": [order[\"orderId\"]],\n \"DetailType\": \"OrderDeleted\",\n \"Detail\": json.dumps(order),\n \"EventBusName\": \"EVENT_BUS_NAME\"\n }\n\n return {\"record\": record, \"event\": event}\n\n\n@pytest.fixture\ndef modify_data(order):\n new_order = copy.deepcopy(order)\n new_order[\"status\"] = \"COMPLETED\"\n\n record = {\n \"awsRegion\": \"us-east-1\",\n \"dynamodb\": {\n \"Keys\": {\n \"orderId\": {\"S\": order[\"orderId\"]}\n },\n \"OldImage\": {k: TypeSerializer().serialize(v) for k, v in order.items()},\n \"NewImage\": {k: TypeSerializer().serialize(v) for k, v in new_order.items()},\n \"SequenceNumber\": \"1234567890123456789012345\",\n \"SizeBytes\": 123,\n \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n },\n \"eventID\": str(uuid.uuid4()),\n \"eventName\": \"REMOVE\",\n \"eventSource\": \"aws:dynamodb\",\n \"eventVersion\": \"1.0\"\n }\n event = {\n \"Source\": \"ecommerce.orders\",\n \"Resources\": [order[\"orderId\"]],\n \"DetailType\": \"OrderDeleted\",\n \"Detail\": json.dumps({\n \"old\": order,\n \"new\": new_order,\n \"changed\": [\"status\"]\n }),\n \"EventBusName\": \"EVENT_BUS_NAME\"\n }\n\n return {\"record\": record, \"event\": event}\n\n\ndef test_send_events(lambda_module, insert_data):\n \"\"\"\n Test send_events()\n \"\"\"\n\n eventbridge = stub.Stubber(lambda_module.eventbridge)\n\n events = [insert_data[\"event\"]]\n response = {}\n expected_params = {\"Entries\": events}\n\n eventbridge.add_response(\"put_events\", response, expected_params)\n eventbridge.activate()\n\n lambda_module.send_events(events)\n\n eventbridge.assert_no_pending_responses()\n eventbridge.deactivate()\n\n\ndef test_handler(lambda_module, context, insert_data):\n \"\"\"\n Test the Lambda function handler\n \"\"\"\n\n # Prepare Lambda event and context\n event = {\"Records\": [insert_data[\"record\"]]}\n\n # Stubbing boto3\n eventbridge = stub.Stubber(lambda_module.eventbridge)\n # Ignore time\n insert_data[\"event\"][\"Time\"] = stub.ANY\n expected_params = {\"Entries\": [insert_data[\"event\"]]}\n eventbridge.add_response(\"put_events\", {}, expected_params)\n eventbridge.activate()\n\n # Send request\n lambda_module.handler(event, context)\n\n # Check that events were sent\n eventbridge.assert_no_pending_responses()\n eventbridge.deactivate()", "repo_name": "aws-samples/aws-serverless-ecommerce-platform", "sub_path": "orders/tests/unit/test_table_update.py", "file_name": "test_table_update.py", "file_ext": "py", "file_size_in_byte": 5506, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1015, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fixtures.lambda_module", "line_number": 13, "usage_type": "name"}, {"api_name": "fixtures.lambda_module", "line_number": 21, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "call"}, {"api_name": "fixtures.context", "line_number": 22, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 30, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 31, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 25, "usage_type": "attribute"}, {"api_name": "boto3.dynamodb.types.TypeSerializer", "line_number": 70, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 75, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 84, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 62, "usage_type": "attribute"}, {"api_name": "boto3.dynamodb.types.TypeSerializer", "line_number": 99, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 104, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 113, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 91, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 122, "usage_type": "call"}, {"api_name": "boto3.dynamodb.types.TypeSerializer", "line_number": 131, "usage_type": "call"}, {"api_name": "boto3.dynamodb.types.TypeSerializer", "line_number": 132, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 137, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 120, "usage_type": "attribute"}, {"api_name": "botocore.stub.Stubber", "line_number": 162, "usage_type": "call"}, {"api_name": "botocore.stub", "line_number": 162, "usage_type": "name"}, {"api_name": "fixtures.lambda_module.eventbridge", "line_number": 162, "usage_type": "attribute"}, {"api_name": "fixtures.lambda_module", "line_number": 162, "usage_type": "name"}, {"api_name": "fixtures.lambda_module.send_events", "line_number": 171, "usage_type": "call"}, {"api_name": "fixtures.lambda_module", "line_number": 171, "usage_type": "name"}, {"api_name": "botocore.stub.Stubber", "line_number": 186, "usage_type": "call"}, {"api_name": "botocore.stub", "line_number": 186, "usage_type": "name"}, {"api_name": "fixtures.lambda_module.eventbridge", "line_number": 186, "usage_type": "attribute"}, {"api_name": "fixtures.lambda_module", "line_number": 186, "usage_type": "name"}, {"api_name": "botocore.stub.ANY", "line_number": 188, "usage_type": "attribute"}, {"api_name": "botocore.stub", "line_number": 188, "usage_type": "name"}, {"api_name": "fixtures.lambda_module.handler", "line_number": 194, "usage_type": "call"}, {"api_name": "fixtures.context", "line_number": 194, "usage_type": "argument"}, {"api_name": "fixtures.lambda_module", "line_number": 194, "usage_type": "name"}]} +{"seq_id": "34521909397", "text": "import cv2 as cv\r\nimport numpy as np\r\n\r\n# Load the image\r\nimage = cv.imread(r\"E:\\cv lab\\bike.jpg\", cv.IMREAD_GRAYSCALE)\r\n\r\n# Apply Gaussian blur to reduce noise\r\nimg_gauss = cv.GaussianBlur(image, (3, 3), 0)\r\n\r\n# Define Prewitt kernels\r\nkernel_x = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\r\nkernel_y = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])\r\n\r\n# Calculate Prewitt gradients\r\nimg_prewittx = cv.filter2D(img_gauss, cv.CV_64F, kernel_x)\r\nimg_prewitty = cv.filter2D(img_gauss, cv.CV_64F, kernel_y)\r\n\r\n# Calculate the gradient magnitude\r\nimg_prewitt = cv.magnitude(img_prewittx, img_prewitty)\r\n\r\n# Normalize the gradient magnitude to the range [0, 255]\r\nimg_prewitt = cv.normalize(img_prewitt, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)\r\n\r\n# Display the original and Prewitt edge detection results\r\ncv.imshow(\"Original Image\", image)\r\ncv.imshow(\"Prewitt Edge Detection\", img_prewitt)\r\n\r\n# Wait for a key press and then close the windows\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()\r\n", "repo_name": "barathkumar-p/ITA0501-COMPUTER-VISION", "sub_path": "prewitt.py", "file_name": "prewitt.py", "file_ext": "py", "file_size_in_byte": 982, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.filter2D", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.magnitude", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.CV_8U", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "26961863520", "text": "import time\nimport os\nimport requests\nimport math\nimport shutil\n\n##SETTINGS##\nvertion = \"1.0.0\"\nauther = \"Mr. GIitch\"\ndirectory = \"GameHub\"\nstandartFolder = \"C:/Program Files (x86)/\"\nos.system('TITLE Quiz version ' + vertion) #Renames the windown titel\n\n##FUNTIONS##\ndef locationfordownload(location):\n if location == \"\":\n locationfordownload == print(\"Downloading to \" + standartFolder)\n elif location != \"\":\n locationfordownload == print(\"Downloading to \" + location)\n\ndef progress_bar(progress, total):\n precent = 100 * (progress / float(total))\n bar = '█' * int(precent) + '-' * (100 - int(precent))\n print(f\"\\r|{bar}| {precent:.2f}%\", end=\"\\r\")\n\n\nprint(\"Default folder is \" + standartFolder)\nprint(\"Press enter to select Default folder\")\nlocation = input(\"Paste the path of the installation folder: \")\nlocationfordownload(location)\n\ninstallation = requests.get(\"http://localhost/installer/vertion.txt\")\nprint(installation)\ntime.sleep(2)\n", "repo_name": "gamehub-project/GameHub-Installer", "sub_path": "py-installer/installer.py", "file_name": "installer.py", "file_ext": "py", "file_size_in_byte": 973, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.system", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "33918693036", "text": "from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom time import sleep\nfrom score import Scoreboard\n\nscreen = Screen()\n\nscreen.bgcolor(\"black\")\nscreen.setup(width=800,height=600)\nscreen.title(\"Pong\")\nscreen.tracer(0) # turn off the animation\n\nr_paddle = Paddle((350, 0))\nl_paddle = Paddle((-350, 0))\n\nball = Ball()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(r_paddle.go_up, \"Up\")\nscreen.onkey(r_paddle.go_down, \"Down\")\n\nscreen.onkey(l_paddle.go_up, \"w\")\nscreen.onkey(l_paddle.go_down, \"s\")\n\ngame_over = False\nwhile not game_over:\n sleep(ball.move_speed)\n screen.update()\n ball.move()\n # ball's collision with top or bottom wall\n if abs(ball.ycor()) > 280:\n ball.bounce_y()\n \n # detect collision with r paddle\n if ball.distance(r_paddle) < 50 and ball.xcor() >330:\n ball.bounce_x()\n \n if ball.distance(l_paddle) < 50 and ball.xcor() <-330:\n ball.bounce_x()\n \n # detext r paddle misses ball\n if ball.xcor() > 380:\n ball.reset_position()\n scoreboard.add_lscore()\n scoreboard.update_score()\n \n # detext l paddle misses ball\n if ball.xcor() < -380:\n ball.reset_position()\n scoreboard.add_rscore()\n scoreboard.update_score()\n\nscreen.exitonclick()", "repo_name": "primepatel/pypong", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1294, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "turtle.Screen", "line_number": 7, "usage_type": "call"}, {"api_name": "paddle.Paddle", "line_number": 14, "usage_type": "call"}, {"api_name": "paddle.Paddle", "line_number": 15, "usage_type": "call"}, {"api_name": "ball.Ball", "line_number": 17, "usage_type": "call"}, {"api_name": "score.Scoreboard", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "ball.move_speed", "line_number": 29, "usage_type": "attribute"}, {"api_name": "ball.move", "line_number": 31, "usage_type": "call"}, {"api_name": "ball.ycor", "line_number": 33, "usage_type": "call"}, {"api_name": "ball.bounce_y", "line_number": 34, "usage_type": "call"}, {"api_name": "ball.distance", "line_number": 37, "usage_type": "call"}, {"api_name": "ball.xcor", "line_number": 37, "usage_type": "call"}, {"api_name": "ball.bounce_x", "line_number": 38, "usage_type": "call"}, {"api_name": "ball.distance", "line_number": 40, "usage_type": "call"}, {"api_name": "ball.xcor", "line_number": 40, "usage_type": "call"}, {"api_name": "ball.bounce_x", "line_number": 41, "usage_type": "call"}, {"api_name": "ball.xcor", "line_number": 44, "usage_type": "call"}, {"api_name": "ball.reset_position", "line_number": 45, "usage_type": "call"}, {"api_name": "ball.xcor", "line_number": 50, "usage_type": "call"}, {"api_name": "ball.reset_position", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "19357943681", "text": "from __future__ import print_function, division\n\nimport numpy as np\nimport time\nimport random\nfrom collections import Counter\n\nimport torch\nfrom torch_geometric.data import Data, DataLoader\n\nclass MoleculeDataset():\n def __init__(self, args):\n data_file = args.data_file\n data_dict = np.load(data_file, allow_pickle=True)\n\n self.args = args\n\n self.data_list = []\n x_list = data_dict['x']\n y_list = data_dict['y']\n edge_index_list = data_dict['edge_index']\n for x, y, edge_index in zip(x_list, y_list, edge_index_list):\n x = torch.from_numpy(np.array(x)).float()\n y = torch.from_numpy(np.array(y)).float()\n edge_index = torch.from_numpy(np.array(edge_index)).long().t().contiguous()\n self.data_list.append(Data(x=x, y=y, edge_index=edge_index))\n\n self.split_list = data_dict['split']\n\n self.args.num_node_features = self.data_list[0].num_node_features\n\n self.preprocess()\n\n def load_data(self, partition):\n if partition == 'train':\n idx_list = self.train_idx_list\n elif partition == 'val':\n idx_list = self.val_idx_list\n elif partition == 'test':\n idx_list = self.test_idx_list\n\n data_list = []\n for idx in idx_list:\n data_list.append(self.data_list[idx])\n\n return data_list\n\n def preprocess(self):\n n_split = len(self.split_list)\n self.train_idx_list = []\n self.val_idx_list = []\n self.test_idx_list = []\n\n fold_idx_list = list(range(n_split))\n test_fold_idx = self.args.fold_idx\n val_fold_idx = (test_fold_idx + 1) % n_split\n\n fold_idx_list.remove(test_fold_idx)\n fold_idx_list.remove(val_fold_idx)\n train_fold_idx = fold_idx_list\n\n self.test_idx_list.extend(list(self.split_list[test_fold_idx]))\n self.val_idx_list.extend(list(self.split_list[val_fold_idx]))\n for i in train_fold_idx:\n self.train_idx_list.extend(list(self.split_list[i]))\n\n assert len(set(self.train_idx_list) & set(self.val_idx_list)) == 0\n assert len(set(self.val_idx_list) & set(self.test_idx_list)) == 0\n assert len(set(self.train_idx_list) & set(self.test_idx_list)) == 0\n \n", "repo_name": "QHwan/MOLGNN_BASELINE", "sub_path": "batch.py", "file_name": "batch.py", "file_ext": "py", "file_size_in_byte": 2294, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.load", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "torch_geometric.data.Data", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "9104852599", "text": "import numpy as np\nimport torch\nfrom model import DynamicsModel\n\n\n# transition model: s'-s = f(s,a) + noise\n\nclass DataSet:\n def __init__(self, X, t, w):\n self.X = X\n self.t = t\n self.w = w\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n return self.X[index], self.t[index], self.w[index]\n\n\nclass DynamicsRegression():\n def __init__(self, input_data, output_data, hidden_unit_num, ob_dim=None, B_dash=1.):\n\n self.B_dash=B_dash\n self.hidden_unit_num=hidden_unit_num\n\n self.input_data = torch.from_numpy(input_data.astype(np.float32)).clone()\n self.output_data = torch.from_numpy(output_data.astype(np.float32)).clone()\n self.data_num = input_data.shape[0]\n self.input_dim = input_data.shape[1]\n self.output_dim = output_data.shape[1]\n\n if ob_dim is None:\n self.ob_dim = output_data.shape[1]\n\n self.model = DynamicsModel(self.input_dim, self.output_dim, self.hidden_unit_num)\n\n self.data_weight = torch.ones(self.data_num,1)\n #self.logvar = torch.nn.Parameter(torch.sum(self.output_data**2, dim=0)/self.input_data.shape[0])\n\n def save_model(self, filename='temp_mle_dynamics_model'):\n torch.save(self.model, filename+'_param.pt')\n np.savetxt(filename+'_iw.csv', self.data_weight.numpy(),delimiter=',')\n \n\n def load_model(self, filename='temp_mle_dynamics_model'):\n self.model = torch.load(filename+'_param.pt')\n self.data_weight = torch.from_numpy(np.loadtxt(filename+'_iw.csv',delimiter=',').astype(np.float32)).clone().reshape(self.data_num,1)\n print(\"load \",filename)\n\n def get_data_weight(self):\n return self.data_weight.numpy()\n\n def load_data_weight(self, data_weight):\n print(\"[reg] load weight\")\n self.data_weight = torch.tensor(data_weight.astype(np.float32).reshape(data_weight.shape[0],1))\n\n\n def train_model(self, num_iter=10000,#2000, \n batch_size=32, \n lr=1e-3, weight_decay=0.0001, \n param_update_penalty=0.0,\n holdout_ratio=0.0, \n grad_vector=None):\n #self.model = DynamicsModel(self.input_dim, self.output_dim, self.hidden_unit_num)\n init_param=[]\n for p in self.model.parameters():\n init_param.append(torch.flatten(p.data.clone()))\n print(\"p.data.shape\",p)\n init_param = torch.cat(init_param,dim=0)\n print(\"init_param.shape\",init_param.shape)\n\n print(\"[reg] learning rate\",lr)\n print(\"[reg] weight decay\",weight_decay)\n print(\"[reg] penalty\",param_update_penalty) \n optimizer = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n\n\n data_index = np.arange(self.data_num)\n np.random.shuffle(data_index)\n\n train_input_data = self.input_data[data_index[int(holdout_ratio*self.data_num):]]\n train_target_data = self.output_data[data_index[int(holdout_ratio*self.data_num):]]\n train_data_weight = self.data_weight[data_index[int(holdout_ratio*self.data_num):]]\n\n if holdout_ratio>0.001:\n valid_input_data = self.input_data[data_index[:int(holdout_ratio*self.data_num)]]\n valid_data_weight = self.data_weight[data_index[:int(holdout_ratio*self.data_num)]]\n valid_target_data = self.output_data[data_index[:int(holdout_ratio*self.data_num)]]\n\n train_dataset2 = DataSet(train_input_data, train_target_data, train_data_weight)\n temp_loader = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, shuffle=True, drop_last=True)\n\n self.grad_vector=grad_vector\n print(\"grad_vector\",grad_vector)\n\n best_loss = 1.e12\n update_num = 0\n for epoch in range(num_iter):\n\n train_loss = 0.\n for data in temp_loader:\n X = data[0]\n t = data[1]\n w = data[2]\n optimizer.zero_grad()\n mu, logvar = self.model(X)\n inv_var = torch.exp(-logvar)\n loss = 0.5 * ( (inv_var*((mu-t)**2) + logvar) * w ).sum() / X.shape[0] # averaged per data\n\n temp_param=[]\n for p in self.model.parameters():\n temp_param.append(torch.flatten(p.data))\n\n temp_param=torch.cat(temp_param,dim=0)\n if self.grad_vector is not None:\n loss += torch.sum((temp_param-init_param)*self.grad_vector) # averaged per data\n #loss += param_update_penalty*torch.sum((self.grad_vector*(init_param-temp_param))**2)\n loss += param_update_penalty*( (init_param-temp_param)**2 ).sum()\n loss.backward()\n optimizer.step()\n train_loss += loss.item() * X.shape[0]\n\n if holdout_ratio>0.001:\n with torch.no_grad():\n mu, logvar = self.model(valid_input_data)\n inv_var = torch.exp(-logvar)\n valid_loss = 0.5*( (inv_var*((mu-valid_target_data)**2) + logvar) * valid_data_weight ).sum() / valid_input_data.shape[0]\n temp_param=[]\n for p in self.model.parameters():\n temp_param.append(torch.flatten(p.data))\n temp_param=torch.cat(temp_param,dim=0)\n if self.grad_vector is not None:\n valid_loss += torch.sum((temp_param-init_param)*self.grad_vector) # averaged per data\n valid_loss += param_update_penalty*( (temp_param-init_param)**2 ).sum()\n if best_loss>valid_loss:\n update_num = 0\n best_loss = valid_loss\n else:\n update_num += 1\n\n print(\"epoch, valid_loss, update_num\", epoch, valid_loss, update_num)\n else:\n if best_loss>train_loss:\n update_num = 0\n best_loss = train_loss\n else:\n update_num += 1\n print(\"epoch, train_loss, update_num\", epoch, train_loss, update_num)\n\n if update_num>20:\n break\n\n\n\n temp_param=[]\n for p in self.model.parameters():\n temp_param.append(torch.flatten(p.data))\n print(p.data)\n temp_param=torch.cat(temp_param,dim=0)\n print(\"parameter_diff\",torch.sum((init_param-temp_param)**2))\n\n with torch.no_grad():\n print(\"var\",torch.exp(self.model.logvar))\n\n with torch.no_grad():\n mu, logvar = self.model(self.input_data)\n inv_var = torch.exp(-logvar) \n unweighted_nll = 0.5 * (inv_var*((mu-self.output_data)**2) + logvar ).sum(-1)\n unweighted_nll += 0.5 * np.log(2.*np.pi) * self.ob_dim\n np.savetxt('temp_unweighted_nll.csv', unweighted_nll.numpy()-np.min(unweighted_nll.numpy()), delimiter=',')\n\n\n def loss(self):\n X = self.input_data\n t = self.output_data\n w = self.data_weight\n with torch.no_grad():\n mu, logvar = self.model(X)\n inv_var = torch.exp(-logvar) \n loss = 0.5 * ( (inv_var*((mu-t)**2) + logvar) * w ).sum() / X.shape[0] \n loss += 0.5 * np.log(2.*np.pi) * self.ob_dim\n return loss.numpy()\n\n\n def sim_next_ob(self, ob, ac):\n\n obac = np.concatenate((ob,ac),axis=0)\n with torch.no_grad():\n pred, logvar = self.model( torch.from_numpy(obac.astype(np.float32)).clone() )\n noise = torch.randn(self.output_dim) * torch.exp(0.5*logvar)\n y = pred + noise\n return ob + y.numpy()\n\n def get_b_hat(self):\n with torch.no_grad():\n self.noise_weight = torch.exp(self.model.state_dict()[\"logvar\"])\n mu, _ = self.model(self.input_data)\n unweighted_nll = 0.5* (torch.unsqueeze(torch.sum((1./self.noise_weight)*((mu-self.output_data)**2),dim=1),1)+torch.sum(torch.log(2.*np.pi*self.noise_weight)))\n return self.B_dash*0.5/np.sqrt(self.loss() - np.min(unweighted_nll.numpy()))\n\n #def loss2(self, reward_fn, gamma):\n # with torch.no_grad():\n # self.noise_weight = torch.exp(self.model.state_dict()[\"logvar\"])\n # mu, _ = self.model(self.input_data)\n # unweighted_nll = 0.5* (torch.unsqueeze(torch.sum((1./self.noise_weight)*((mu-self.output_data)**2),dim=1),1)+torch.sum(torch.log(2.*np.pi*self.noise_weight)))\n # temp = self.B_dash*torch.sqrt( self.loss() - np.min(unweighted_nll.numpy()) )\n # for i in range(self.data_num):\n # temp -= (self.data_weight[i,0]/self.data_num) * reward_fn(self.input_data[i]) / (1.-gamma)\n # return temp[0]\n \n\nif __name__ == '__main__':\n obac_data = np.loadtxt('np_obac.csv',delimiter=',')\n nextob_data = np.loadtxt('np_diff_ob.csv',delimiter=',')\n\n test_model = DynamicsRegression(obac_data, nextob_data, 8)\n\n #weight = np.loadtxt('weight.csv',delimiter=',')\n #test_model.load_data_weight(weight)\n #test_model.load_model()\n\n test_model.train_model()\n\n test_model.save_model()\n\n \n", "repo_name": "numahha/wmopo", "sub_path": "pendulum_experiments/regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 9245, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.from_numpy", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "model.DynamicsModel", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.flatten", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.flatten", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.flatten", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.flatten", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 190, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "44049394565", "text": "\"\"\"The :mod:`feature_extractor` contains a FeatureExtractor for audio files.\"\"\"\n\n# Authors: Peter Steiner ,\n# License: BSD 3 clause\n\nfrom __future__ import annotations\n\nfrom typing import Union, Callable, Dict, Optional\n\nimport numpy as np\nfrom sklearn.preprocessing import FunctionTransformer\n\n\nclass FeatureExtractor(FunctionTransformer):\n \"\"\"\n Construct a transformer from an arbitrary callable.\n\n A FunctionTransformer forwards its X (and optionally y) arguments to a\n user-defined function or function object and returns the result of this\n function.\n This is useful for stateless transformations such as taking the log of\n frequencies, doing custom scaling, etc.\n\n Compared to sklearn.preprocessing.FunctionTransformer, it is possible to\n pass a filename as X and process the underlying file.\n\n Note: If a lambda is used as the function, then the resulting transformer\n will not be pickleable.\n\n Parameters\n ----------\n func : Union[Callable, None]\n The callable to use for the transformation.\n This will be passed the same arguments as transform,\n with args and kwargs forwarded.\n If func is None, then func will be the identity function.\n kw_args : Union[Dict, None], default=None.\n Dictionary of additional keyword arguments to pass to func.\n\n \"\"\"\n\n def __init__(self, func: Union[Callable, None],\n kw_args: Union[Dict, None] = None):\n \"\"\"Construct the FeatureExtractor.\"\"\"\n super().__init__(func=func, inverse_func=None, validate=False,\n accept_sparse=False, check_inverse=False,\n kw_args=kw_args, inv_kw_args=None)\n\n def fit(self, X: Union[str, np.ndarray], y: Optional[np.ndarray] = None)\\\n -> FeatureExtractor:\n \"\"\"\n Fit transformer by checking X.\n\n Parameters\n ----------\n X : Union[str, np.ndarray]\n Input that can either be a feature matrix or a filename.\n y : Optional[np.ndarray, None], default=None\n Target values (None for unsupervised transformations).\n \"\"\"\n super().fit(X=X, y=y)\n return self\n\n def transform(self, X: Union[str, np.ndarray]) -> np.ndarray:\n \"\"\"\n Transform X using the forward function.\n\n Parameters\n ----------\n X : Union[str, np.ndarray]\n Input that can either be a feature matrix or a filename.\n\n Returns\n -------\n X_out : array-like, shape (n_samples, n_features)\n Transformed input.\n\n \"\"\"\n X_out = self._transform(X=X, func=self.func, kw_args=self.kw_args)\n if type(X_out) is tuple:\n X_out = X_out[0]\n return X_out\n", "repo_name": "TUD-STKS/PyRCN", "sub_path": "src/pyrcn/util/_feature_extractor.py", "file_name": "_feature_extractor.py", "file_ext": "py", "file_size_in_byte": 2770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 79, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sklearn.preprocessing.FunctionTransformer", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 49, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "3188649649", "text": "from keras.preprocessing import image\nfrom sklearn.datasets import load_files\nfrom keras.utils import np_utils\nimport numpy as np\nfrom scipy import io\nimport scipy as sp\nfrom sklearn.utils import Bunch\nfrom tqdm import tqdm\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport numbers\n\n\ndef path_to_tensor(img_path, data_format=None):\n \"\"\"\n This method loads an image and returns it as tensor with the shape (1, 224, 224, 3) or (1, 3, 224, 244).\n :param img_path: The image file.\n :param data_format: Allows to change the data to channels first.\n :return: A tensor in the shape (1, 224, 224, 3) or (1, 3, 224, 244).\n \"\"\"\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img, data_format=data_format)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\n\ndef paths_to_tensor(img_paths, data_format=None):\n \"\"\"\n This method loads images and returns them as tensor with the shape (#, 224, 224, 3) or (#, 3, 224, 224).\n :param img_paths: A list of image paths.\n :param data_format: Allows to change the data to channels first.\n :return: A tensor in the shape (#, 224, 224, 3) or (#, 3, 244, 244).\n \"\"\"\n list_of_tensors = [path_to_tensor(img_path, data_format=data_format) for img_path in tqdm(img_paths)]\n return np.vstack(list_of_tensors)\n\n\ndef extract_xception(tensor):\n \"\"\"\n This method loads Xception, pre-trained with the ImageNet data set, and runs the given input against it. After that\n it returns the bottleneck features.\n :param tensor: The input values for Xception.\n :return: The bottleneck features for the given input.\n \"\"\"\n from keras.applications.xception import Xception, preprocess_input\n return Xception(weights='imagenet', include_top=False).predict(preprocess_input(tensor))\n\n\ndef load_dataset(path, onehot=True):\n \"\"\"\n This method loads an image data set from a folder structure like this:\n folder\n |- class01\n ||- image01.jpg\n ||- image02.jpg\n ||- ...\n |- class02\n ||- ...\n |- ...\n It returns a tuple with the file names and the corresponding category.\n :param path: The path to the folder.\n :param onehot: Whether to return the category as data or as onehot-encoded array.\n :return: A tuple of (files, category).\n \"\"\"\n data = load_files(path)\n dog_files = np.array(data['filenames'])\n\n dog_targets = np.array(data['target'])\n if onehot:\n dog_targets = np_utils.to_categorical(dog_targets, 133)\n\n return dog_files, dog_targets\n\n\ndef fetch_mldata(dataname, target_name='label', data_name='data', transpose_data=True, data_home=None):\n \"\"\"Fetch an mldata.org data set\n\n mldata.org is no longer operational.\n NOTE: This is a stubbed version which can only load mnist from a local file!\n\n If the file does not exist yet, it is downloaded from mldata.org .\n\n mldata.org does not have an enforced convention for storing data or\n naming the columns in a data set. The default behavior of this function\n works well with the most common cases:\n\n 1) data values are stored in the column 'data', and target values in the\n column 'label'\n 2) alternatively, the first column stores target values, and the second\n data values\n 3) the data array is stored as `n_features x n_samples` , and thus needs\n to be transposed to match the `sklearn` standard\n\n Keyword arguments allow to adapt these defaults to specific data sets\n (see parameters `target_name`, `data_name`, `transpose_data`, and\n the examples below).\n\n mldata.org data sets may have multiple columns, which are stored in the\n Bunch object with their original name.\n\n Parameters\n ----------\n\n dataname : str\n Name of the data set on mldata.org,\n e.g.: \"leukemia\", \"Whistler Daily Snowfall\", etc.\n The raw name is automatically converted to a mldata.org URL .\n\n target_name : optional, default: 'label'\n Name or index of the column containing the target values.\n\n data_name : optional, default: 'data'\n Name or index of the column containing the data.\n\n transpose_data : optional, default: True\n If True, transpose the downloaded data array.\n\n data_home : optional, default: None\n Specify another download and cache folder for the data sets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n Returns\n -------\n\n data : Bunch\n Dictionary-like object, the interesting attributes are:\n 'data', the data to learn, 'target', the classification labels,\n 'DESCR', the full description of the dataset, and\n 'COL_NAMES', the original names of the dataset columns.\n \"\"\"\n\n # normalize dataset name\n filename = 'mnist.mat'\n\n # load dataset matlab file\n with open(filename, 'rb') as matlab_file:\n matlab_dict = io.loadmat(matlab_file, struct_as_record=True)\n\n # -- extract data from matlab_dict\n\n # flatten column names\n col_names = [str(descr[0])\n for descr in matlab_dict['mldata_descr_ordering'][0]]\n\n # if target or data names are indices, transform then into names\n if isinstance(target_name, numbers.Integral):\n target_name = col_names[target_name]\n if isinstance(data_name, numbers.Integral):\n data_name = col_names[data_name]\n\n # rules for making sense of the mldata.org data format\n # (earlier ones have priority):\n # 1) there is only one array => it is \"data\"\n # 2) there are multiple arrays\n # a) copy all columns in the bunch, using their column name\n # b) if there is a column called `target_name`, set \"target\" to it,\n # otherwise set \"target\" to first column\n # c) if there is a column called `data_name`, set \"data\" to it,\n # otherwise set \"data\" to second column\n\n dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,\n 'COL_NAMES': col_names}\n\n # 1) there is only one array => it is considered data\n if len(col_names) == 1:\n data_name = col_names[0]\n dataset['data'] = matlab_dict[data_name]\n # 2) there are multiple arrays\n else:\n for name in col_names:\n dataset[name] = matlab_dict[name]\n\n if target_name in col_names:\n del dataset[target_name]\n dataset['target'] = matlab_dict[target_name]\n else:\n del dataset[col_names[0]]\n dataset['target'] = matlab_dict[col_names[0]]\n\n if data_name in col_names:\n del dataset[data_name]\n dataset['data'] = matlab_dict[data_name]\n else:\n del dataset[col_names[1]]\n dataset['data'] = matlab_dict[col_names[1]]\n\n # set axes to scikit-learn conventions\n if transpose_data:\n dataset['data'] = dataset['data'].T\n if 'target' in dataset:\n if not sp.sparse.issparse(dataset['target']):\n dataset['target'] = dataset['target'].squeeze()\n\n return Bunch(**dataset)\n\n", "repo_name": "ChristophRaab/cpn_ba", "sub_path": "common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 7199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 10, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 10, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 22, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 26, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.applications.xception.Xception", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.applications.xception.preprocess_input", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_files", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 72, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 139, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 139, "usage_type": "name"}, {"api_name": "numbers.Integral", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numbers.Integral", "line_number": 150, "usage_type": "attribute"}, {"api_name": "scipy.sparse.issparse", "line_number": 193, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 193, "usage_type": "attribute"}, {"api_name": "sklearn.utils.Bunch", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "24764346030", "text": "from flask import Blueprint, render_template, request, redirect, flash, url_for\nfrom flask_login import login_user, logout_user, login_required, current_user\n\nfrom app import db, bcrypt\nfrom models.User import User\nfrom forms.LoginForm import SignInForm, SignUpForm\n\naccount_app = Blueprint(\"account_app\", __name__)\n\n\n@account_app.route(\"/sign_in\", methods=[\"GET\"])\ndef sign_in():\n form = SignInForm()\n if request.method == \"GET\":\n return render_template(\"account/sign_in.jinja2\", form=form)\n\n\n@account_app.route(\"/sign_in\", methods=[\"POST\"])\ndef sign_in_create():\n form = SignInForm()\n\n if form.validate_on_submit():\n user = db.session.query(User).filter_by(\n register=form.register.data\n ).first()\n\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user)\n return redirect(url_for(\"call_app.home\"))\n else:\n flash(\"Invalid inputs\", \"danger\")\n else:\n errors = list(form.errors.values())[0][0]\n flash(errors, \"danger\")\n \n return redirect(url_for(\"account_app.sign_in\"))\n\n\n@account_app.route(\"/sign_up\", methods=[\"GET\"])\ndef sign_up():\n return render_template(\"account/sign_up.jinja2\", form=SignUpForm())\n\n\n@account_app.route(\"/sign_up\", methods=[\"POST\"])\ndef sign_up_create():\n form = SignUpForm()\n\n if form.validate_on_submit():\n try:\n user = User(\n name=form.name.data,\n register=db.session.query(User).order_by(User.register.desc()).first().register+1,\n password=bcrypt.generate_password_hash(form.password1.data),\n role=form.role.data\n )\n db.session.add(user)\n db.session.commit()\n\n flash(f\"User created (register: {user.register})\", \"success\")\n\n return redirect(url_for(\"account_app.sign_in\"))\n except Exception as e:\n flash(\"Error persisting data\", \"danger\")\n else:\n errors = list(form.errors.values())[0][0]\n flash(errors, \"danger\")\n \n return redirect(url_for(\"account_app.sign_up\"))\n\n\n@account_app.route(\"/profile\", methods=[\"GET\"])\n@login_required\ndef profile():\n form = SignUpForm()\n if request.method == \"GET\":\n form.name.data = current_user.name\n form.role.data = current_user.role.__str__()\n\n return render_template(\"account/profile.jinja2\", form=form)\n\n\n@account_app.route(\"/profile\", methods=[\"POST\"])\n@login_required\ndef profile_edit():\n form = SignUpForm()\n\n if form.validate_on_submit():\n try:\n user = db.session.query(User).filter_by(id=current_user.id).first()\n user.name=form.name.data\n user.password=bcrypt.generate_password_hash(form.password1.data)\n user.role=form.role.data\n db.session.commit()\n\n flash(\"User edited\", \"success\")\n return redirect(url_for(\"call_app.home\"))\n except Exception as e:\n flash(\"Erro in persist data\", \"danger\")\n else:\n errors = list(form.errors.values())[0][0]\n flash(errors, \"danger\")\n\n return redirect(url_for(\"account_app.profile\"))\n \n\n@account_app.route(\"/logout\", methods=[\"GET\"])\n@login_required\ndef log_out():\n logout_user()\n flash(\"Exited\", \"success\")\n return redirect(url_for(\"account_app.sign_in\"))\n", "repo_name": "oseias-romeiro/ChamadaEletronica", "sub_path": "controllers/account.py", "file_name": "account.py", "file_ext": "py", "file_size_in_byte": 3364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "forms.LoginForm.SignInForm", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "forms.LoginForm.SignInForm", "line_number": 20, "usage_type": "call"}, {"api_name": "app.db.session.query", "line_number": 23, "usage_type": "call"}, {"api_name": "models.User.User", "line_number": 23, "usage_type": "argument"}, {"api_name": "app.db.session", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 23, "usage_type": "name"}, {"api_name": "app.bcrypt.check_password_hash", "line_number": 27, "usage_type": "call"}, {"api_name": "app.bcrypt", "line_number": 27, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "forms.LoginForm.SignUpForm", "line_number": 41, "usage_type": "call"}, {"api_name": "forms.LoginForm.SignUpForm", "line_number": 46, "usage_type": "call"}, {"api_name": "models.User.User", "line_number": 50, "usage_type": "call"}, {"api_name": "app.db.session.query", "line_number": 52, "usage_type": "call"}, {"api_name": "models.User.User", "line_number": 52, "usage_type": "argument"}, {"api_name": "app.db.session", "line_number": 52, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 52, "usage_type": "name"}, {"api_name": "models.User.User.register.desc", "line_number": 52, "usage_type": "call"}, {"api_name": "models.User.User.register", "line_number": 52, "usage_type": "attribute"}, {"api_name": "app.bcrypt.generate_password_hash", "line_number": 53, "usage_type": "call"}, {"api_name": "app.bcrypt", "line_number": 53, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 56, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 56, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 57, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 68, "usage_type": "call"}, {"api_name": "forms.LoginForm.SignUpForm", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 76, "usage_type": "name"}, {"api_name": "flask_login.current_user.role.__str__", "line_number": 77, "usage_type": "call"}, {"api_name": "flask_login.current_user.role", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 79, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 72, "usage_type": "name"}, {"api_name": "forms.LoginForm.SignUpForm", "line_number": 85, "usage_type": "call"}, {"api_name": "app.db.session.query", "line_number": 89, "usage_type": "call"}, {"api_name": "models.User.User", "line_number": 89, "usage_type": "argument"}, {"api_name": "app.db.session", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 89, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 89, "usage_type": "name"}, {"api_name": "app.bcrypt.generate_password_hash", "line_number": 91, "usage_type": "call"}, {"api_name": "app.bcrypt", "line_number": 91, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 93, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 93, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 103, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 83, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 111, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "73576133954", "text": "import os\n\nimport sys\n\nimport openai\n\nfrom langchain.chains import ConversationalRetrievalChain, RetrievalQA\n\nfrom langchain.chat_models import ChatOpenAI\n\nfrom langchain.document_loaders import DirectoryLoader, TextLoader\n\nfrom langchain.embeddings import OpenAIEmbeddings\n\nfrom langchain.indexes import VectorstoreIndexCreator\n\nfrom langchain.indexes.vectorstore import VectorStoreIndexWrapper\n\nfrom langchain.llms import OpenAI\n\nfrom langchain.vectorstores import Chroma\nimport streamlit as st\n\n#from constants import API_KEY\n\nos.environ[\"OPENAI_API_KEY\"] = st.secrets[\"OPENAI_API_KEY\"]\n\n \n\n# Enable to save to disk & reuse the model (for repeated queries on the same data)\n\nPERSIST = False\n\n \nquery = None\n\nif len(sys.argv) > 1:\n\n query = sys.argv[1]\n\n \n\nif PERSIST and os.path.exists(\"persist\"):\n\n print(\"Reusing index...\\n\")\n\n vectorstore = Chroma(persist_directory=\"persist\", embedding_function=OpenAIEmbeddings())\n\n index = VectorStoreIndexWrapper(vectorstore=vectorstore)\n\nelse:\n\n loader = TextLoader(\"water.txt\") # Use this line if you only need data.txt\n\n #loader = DirectoryLoader(os.path.join(os.getcwd(), 'data'))\n\n if PERSIST:\n\n index = VectorstoreIndexCreator(vectorstore_kwargs={\"persist_directory\":\"persist\"}).from_loaders([loader])\n\n else:\n\n index = VectorstoreIndexCreator().from_loaders([loader])\n\n \nllm = OpenAI(temperature=0,model_name='')\nchain = ConversationalRetrievalChain.from_llm(\n\n llm=llm,\n\n retriever=index.vectorstore.as_retriever(search_kwargs={\"k\": 1}),\n\n)\nst.title(\"Schneider Helpline\")\nif \"messages\" not in st.session_state:\n st.session_state.messages = []\n \nfor message in st.session_state.messages:\n with st.chat_message(message[\"role\"]):\n st.markdown(message[\"content\"])\n \nprompt = st.chat_input(\"type your question here\")\nif prompt:\n with st.chat_message(\"user\"):\n st.markdown(prompt)\n st.session_state.messages.append({\"role\":\"user\", \"content\": prompt})\n response = index.query(prompt)\n if \"I don't know\" in response:\n response = \"Sorry, I do not know the answer. Please contact your Schneider PSP for more details!\"\n with st.chat_message(\"assistant\"):\n st.markdown(response)\n st.session_state.messages.append({\"role\":\"assistant\", \"content\": response})\n \n \n \n \n \n \n \n\n", "repo_name": "divya-g13/streamlit", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "streamlit.secrets", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "langchain.vectorstores.Chroma", "line_number": 47, "usage_type": "call"}, {"api_name": "langchain.embeddings.OpenAIEmbeddings", "line_number": 47, "usage_type": "call"}, {"api_name": "langchain.indexes.vectorstore.VectorStoreIndexWrapper", "line_number": 49, "usage_type": "call"}, {"api_name": "langchain.document_loaders.TextLoader", "line_number": 53, "usage_type": "call"}, {"api_name": "langchain.indexes.VectorstoreIndexCreator", "line_number": 59, "usage_type": "call"}, {"api_name": "langchain.indexes.VectorstoreIndexCreator", "line_number": 63, "usage_type": "call"}, {"api_name": "langchain.llms.OpenAI", "line_number": 66, "usage_type": "call"}, {"api_name": "langchain.chains.ConversationalRetrievalChain.from_llm", "line_number": 67, "usage_type": "call"}, {"api_name": "langchain.chains.ConversationalRetrievalChain", "line_number": 67, "usage_type": "name"}, {"api_name": "streamlit.title", "line_number": 74, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 75, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 76, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 78, "usage_type": "attribute"}, {"api_name": "streamlit.chat_message", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.chat_input", "line_number": 82, "usage_type": "call"}, {"api_name": "streamlit.chat_message", "line_number": 84, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.session_state.messages.append", "line_number": 86, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 86, "usage_type": "attribute"}, {"api_name": "streamlit.chat_message", "line_number": 90, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 91, "usage_type": "call"}, {"api_name": "streamlit.session_state.messages.append", "line_number": 92, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 92, "usage_type": "attribute"}]} +{"seq_id": "39425063982", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef arange(t_start, t_end, step):\n ar = np.arange(t_start, t_end, step)\n return np.append(ar, ar[-1] + step) if ar[-1] < t_end else ar\n\ndef sinewave(amp, freq, freq_s, t_start, t_end):\n '''\n Returns an array with values of a sinewave waveform and displays it on a plot.\n '''\n\n # preparing timelines\n _freq_c = 44100 if 44100 > freq_s else freq_s * 2\n\n t = arange(t_start, t_end, (1 / freq_s))\n t_c = arange(t_start, t_end, (1 / _freq_c))\n\n # generating waveforms\n wave = amp * np.sin(2 * np.pi * freq * t)\n wave_c = amp * np.sin(2 * np.pi * freq * t_c)\n \n # plotting\n plt.plot(t, wave, 'ro', t_c, wave_c, 'b')\n plt.title(f'Sine function (amount of periods: {(abs(t_start) + abs(t_end)) / (1 / freq)})')\n plt.xlabel('Time, s')\n plt.ylabel('Value')\n plt.grid(True)\n plt.show()\n\n return wave\n\ndef main():\n x = sinewave(2, 2, 6, -2, 1)\n print(x)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "kbuchman/Scripts-from-studies", "sub_path": "Signal Processing/Lab 1/task_1.2.py", "file_name": "task_1.2.py", "file_ext": "py", "file_size_in_byte": 1003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.arange", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "4169652233", "text": "import logging\nimport theano\nfrom theano.gradient import disconnected_grad\nfrom theano import tensor\n\nfrom blocks.graph import ComputationGraph\nfrom blocks.filter import VariableFilter\nfrom blocks.bricks import Linear, NDimensionalSoftmax\nfrom blocks.bricks.base import application\nfrom blocks.roles import OUTPUT, add_role, WEIGHT\nfrom blocks.utils import dict_subset, shared_floatx_nans\nfrom blocks_extras.bricks.sequence_generator2 import SoftmaxReadout, MergeReadout\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReinforceReadout(SoftmaxReadout):\n\n def __init__(self, reward_brick, entropy=None, **kwargs):\n super(ReinforceReadout, self).__init__(**kwargs)\n self.reward_brick = reward_brick\n self.entropy_coof = entropy\n\n self.value_prediction = Linear(output_dim=1, name='value_prediction')\n\n self.children += [\n reward_brick, self.value_prediction]\n\n self.costs.inputs += ['attended', 'attended_mask']\n\n def _push_allocation_config(self):\n super(ReinforceReadout, self)._push_allocation_config()\n self.value_prediction.input_dim = self.get_dim('states')\n\n @application\n def costs(self, application_call, prediction, prediction_mask,\n groundtruth, groundtruth_mask,\n **inputs):\n states = disconnected_grad(inputs['states'])\n\n merged = self.merge(**dict_subset(inputs, self.merge_names))\n # Compute log-probabilities for the predicted tokens\n log_probs = -self.all_scores(prediction, merged) * prediction_mask\n # Compute per-token rewards\n rewards = self.reward_brick.apply(prediction, prediction_mask,\n groundtruth, groundtruth_mask).sum(axis=-1)\n # Encourage entropy by adding negated log-probs to the rewards\n application_call.add_auxiliary_variable(log_probs, name='log_probs')\n if self.entropy_coof:\n rewards += self.entropy_coof * disconnected_grad(-log_probs)\n\n future_rewards = rewards[::-1].cumsum(axis=0)[::-1]\n\n baselines = self.value_prediction.apply(states)[:, :, 0]\n application_call.add_auxiliary_variable(\n baselines, name='baselines')\n # Compute baseline error\n centered_future_rewards = future_rewards - baselines\n baseline_errors = (\n (centered_future_rewards *\n disconnected_grad(prediction_mask)) ** 2).sum(axis=0)\n application_call.add_auxiliary_variable(\n baseline_errors, name='baseline_errors')\n\n # The gradient of this will be the REINFORCE 1-sample\n # gradient estimate\n costs = (disconnected_grad(centered_future_rewards)\n * log_probs\n * prediction_mask).sum(axis=0)\n\n # Add auxiliary variables for intermediate steps of the computation\n application_call.add_auxiliary_variable(\n rewards, name='rewards')\n application_call.add_auxiliary_variable(\n log_probs.copy(), name='prediction_log_probs')\n\n return costs\n\n\nclass CriticReadout(MergeReadout):\n\n def __init__(self, num_tokens,\n value_softmax=False, same_value_for_wrong=False,\n groundtruth_word_bonus=False, dueling_outputs=False, **kwargs):\n self.value_softmax = value_softmax\n self.same_value_for_wrong = same_value_for_wrong\n self.groundtruth_word_bonus = groundtruth_word_bonus\n self.dueling_outputs = dueling_outputs\n super(CriticReadout, self).__init__(post_merge_dim=num_tokens, **kwargs)\n self.costs.inputs = ([\n 'prediction', 'prediction_mask',\n 'groundtruth', 'groundtruth_mask']\n + self.input_names)\n\n def _allocate(self):\n w = shared_floatx_nans((self.get_dim('states'),), name='add_weights')\n add_role(w, WEIGHT)\n self.parameters.append(w)\n\n def _initialize(self):\n self.weights_init.initialize(self.parameters[0], self.rng)\n\n # For compatibility with Blocks-extras\n def sample(self):\n raise NotImplementedError()\n\n # For compatibility with Blocks-extras\n def scores(self):\n pass\n\n @application\n def costs(self, prediction, prediction_mask,\n groundtruth, groundtruth_mask, **inputs):\n outputs = self.all_outputs(groundtruth, groundtruth_mask, **inputs)\n # It does not matter what we return here, as long as it contains\n # the values in the computation graph.\n return outputs.sum()\n\n @application\n def all_outputs(self, application_call, groundtruth, groundtruth_mask, **inputs):\n outputs = self.merge(**dict_subset(inputs, self.merge_names))\n indices = tensor.repeat(\n tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])\n if self.value_softmax:\n logger.debug('Applying value softmax')\n outputs = (tensor.addbroadcast(outputs[:, :, :1], 2)\n + self.softmax.apply(outputs[:, :, 1:], extra_ndim=1))\n if self.same_value_for_wrong:\n logger.debug('Same value for apriori wrong actions')\n wrong_output = outputs[:, :, 0]\n outputs = outputs[:, :, 1:]\n wrong_mask = tensor.ones_like(outputs[0])\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n outputs = (outputs * (1 - wrong_mask)\n + wrong_output[:, :, None] * wrong_mask)\n application_call.add_auxiliary_variable(wrong_mask, name='wrong_mask')\n if self.groundtruth_word_bonus:\n logger.debug('Bonus for grondtruth words')\n wrong_mask = tensor.ones_like(outputs[0])\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n w, = self.parameters\n bonuses = inputs['states'].dot(w)\n outputs += bonuses[:, :, None] * (1 - wrong_mask)[None, :, :]\n if self.dueling_outputs:\n logger.debug('Dueling outputs a-la dueling networks')\n base_output = outputs[:, :, [0]]\n dueling_outputs = outputs[:, :, 1:]\n outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=2, keepdims=True)\n return outputs\n\n @application\n def outputs(self, groundtruth, groundtruth_mask, **inputs):\n # Copy-pasted from all_outputs, because Theano does not support ellipsis\n outputs = self.merge(**dict_subset(inputs, self.merge_names))\n indices = tensor.repeat(\n tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])\n if self.value_softmax:\n logger.debug('Applying value softmax')\n outputs = (tensor.addbroadcast(outputs[:, :1], 1)\n + self.softmax.apply(outputs[:, 1:]))\n if self.same_value_for_wrong:\n logger.debug('Same value for apriori wrong actions')\n wrong_output = outputs[:, 0]\n outputs = outputs[:, 1:]\n wrong_mask = tensor.ones_like(outputs)\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n outputs = (outputs * (1 - wrong_mask)\n + wrong_output[:, None] * wrong_mask)\n if self.groundtruth_word_bonus:\n logger.debug('Bonus for grondtruth words')\n wrong_mask = tensor.ones_like(outputs)\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n w, = self.parameters\n bonuses = inputs['states'].dot(w)\n outputs = outputs + bonuses[:, None] * (1 - wrong_mask)\n if self.dueling_outputs:\n logger.debug('Dueling outputs a-la dueling networks')\n base_output = outputs[:, [0]]\n dueling_outputs = outputs[:, 1:]\n outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=1, keepdims=True)\n return outputs\n\n\nclass ActorCriticReadout(SoftmaxReadout):\n \"\"\"Actor-critic\n\n Params\n ------\n bos_token : int\n The token used to pad critic input. Critic needs to do\n at least one extra step compared to the actor in order\n to get the first glimpse of the ground-truth sequence\n before predicting the actual values.\n\n \"\"\"\n def __init__(self, reward_brick,\n compute_targets, solve_bellman,\n freeze_actor, freeze_critic, critic_uses_actor_states,\n critic_uses_groundtruth,\n critic=None, critic_burnin_steps=None,\n critic_loss=None,\n critic_policy_t=None,\n entropy_reward_coof=None, cross_entropy_reward_coof=None,\n trpo_coef=None,\n discount=None,\n value_penalty=None, value_penalty_type=None,\n accumulate_outputs=False, use_value_biases=None,\n actor_grad_estimate=None,\n bos_token=None,\n **kwargs):\n super(ActorCriticReadout, self).__init__(**kwargs)\n self.reward_brick = reward_brick\n self.critic = critic\n self.freeze_actor = freeze_actor\n self.freeze_critic = freeze_critic\n self.critic_uses_actor_states = critic_uses_actor_states\n self.critic_uses_groundtruth = (\n critic_uses_groundtruth if critic_uses_groundtruth is not None else True)\n self.critic_burnin_steps = (\n critic_burnin_steps if critic_burnin_steps is not None else 0)\n self.critic_loss = (\n critic_loss if critic_loss is not None else \"L2\")\n self.value_summand = Linear(output_dim=1, name='summand')\n self.softmax_t = 1.\n self.critic_policy_t = (\n critic_policy_t if critic_policy_t is not None else 1.0)\n self.epsilon = 0.\n self.discount = (\n discount if discount is not None else 1.)\n self.entropy_reward_coof = (\n entropy_reward_coof if entropy_reward_coof is not None else 0.)\n self.cross_entropy_reward_coof = (\n cross_entropy_reward_coof if cross_entropy_reward_coof is not None else 0.)\n self.trpo_coef = (\n trpo_coef if trpo_coef is not None else 0.)\n self.value_penalty = value_penalty\n self.value_penalty_type = (\n value_penalty_type if value_penalty_type is not None else \"L2\")\n self.compute_targets = compute_targets\n self.solve_bellman = solve_bellman\n self.accumulate_outputs = accumulate_outputs\n self.use_value_biases = (\n use_value_biases if use_value_biases is not None else True)\n self.actor_grad_estimate = (\n actor_grad_estimate if actor_grad_estimate else 'all_actions')\n self.bos_token = bos_token\n self.softmax = NDimensionalSoftmax()\n self.children += [reward_brick, self.value_summand, self.softmax]\n if self.critic:\n self.children.append(self.critic)\n self.costs.inputs += ['attended', 'attended_mask']\n\n def _push_allocation_config(self):\n super(ActorCriticReadout, self)._push_allocation_config()\n self.value_summand.input_dim = self.get_dim('attended')\n\n @application\n def scores(self, **inputs):\n merged = self.merge(**dict_subset(inputs, self.merge_names))\n return self.softmax.log_probabilities(\n merged * self.softmax_t, extra_ndim=merged.ndim - 2)\n\n @application\n def costs(self, application_call, prediction, prediction_mask,\n groundtruth, groundtruth_mask,\n **inputs):\n def _prediction_subtensor(data):\n if data.ndim != 3:\n raise ValueError\n flat_data = data.reshape((\n data.shape[0] * data.shape[1],\n data.shape[2]))\n flat_data = flat_data[\n tensor.arange(flat_data.shape[0]), prediction.flatten()]\n return flat_data.reshape((\n prediction.shape[0], prediction.shape[1]))\n\n attended = disconnected_grad(inputs.pop('attended'))\n attended_mask = disconnected_grad(inputs.pop('attended_mask'))\n\n # Compute the rewards\n rewards = self.reward_brick.apply(\n prediction, prediction_mask,\n groundtruth, groundtruth_mask)[:, :, 0]\n future_rewards = rewards[::-1].cumsum(axis=0)[::-1]\n\n # Compute the critic outputs\n if self.critic:\n padding = tensor.repeat(\n tensor.fill(prediction[0:1], self.bos_token), 1, axis=0)\n mask_padding = tensor.repeat(\n tensor.fill(prediction_mask[0:1], 1.), 1, axis=0)\n padded_prediction = tensor.concatenate([padding, prediction])\n padded_prediction_mask = tensor.concatenate([mask_padding, prediction_mask])\n if self.critic_uses_groundtruth:\n critic_context = groundtruth\n critic_context_mask = groundtruth_mask\n else:\n critic_context = tensor.zeros_like(groundtruth[0:1])\n critic_context_mask = tensor.zeros_like(groundtruth_mask[0:1])\n critic_kwargs = dict(\n prediction=padded_prediction, prediction_mask=padded_prediction_mask,\n groundtruth=critic_context, groundtruth_mask=critic_context_mask,\n inputs=critic_context, inputs_mask=critic_context_mask)\n\n if self.critic_uses_actor_states:\n extra_inputs = disconnected_grad(inputs['states'])\n # We don't need the very last hidden state of the actor\n # in extra_inputs. We have to add something instead for the shapes\n # to match. It doesn't matter at all, what exactly we add.\n critic_kwargs['extra_inputs'] = tensor.concatenate(\n [extra_inputs, tensor.zeros_like(extra_inputs[0:1])])\n critic_cg = ComputationGraph(self.critic.costs(**critic_kwargs))\n outputs, = VariableFilter(\n applications=[self.critic.generator.readout.all_outputs],\n roles=[OUTPUT])(critic_cg)\n # The first subtensor should be discarded, because it was outputted\n # for the padding. In addition to that Q-values from the first\n # 'critic_burnin_steps' will be ignored, see later in the code.\n outputs = outputs[1:]\n else:\n outputs = self.merge(**dict_subset(inputs, self.merge_names))\n prediction_outputs = _prediction_subtensor(outputs)\n\n # Compute Q adjustments\n adjustments = outputs\n prediction_adjustments = prediction_outputs\n if self.accumulate_outputs:\n prediction_adjustments = prediction_outputs.cumsum(axis=0)\n adjustments = tensor.inc_subtensor(\n adjustments[1:], prediction_adjustments[:-1][:, :, None])\n\n # Compute shared additive biases for all Q values\n if self.use_value_biases:\n value_biases = (\n self.value_summand.apply(attended)[:, :, 0]\n * attended_mask).sum(axis=0)\n else:\n value_biases = tensor.zeros_like(adjustments[0, :, 0])\n values = adjustments + value_biases[None, :, None]\n prediction_values = prediction_adjustments + value_biases[None, :]\n\n rolled_prediction_mask = tensor.roll(prediction_mask, -1, axis=0)\n rolled_prediction_mask = tensor.set_subtensor(\n rolled_prediction_mask[-1], 0)\n\n # Compute probabilities\n logs = self.scores(use_epsilon=False, **inputs)\n probs = tensor.exp(logs)\n if self.trpo_coef:\n logger.debug(\"Using TRPO coefficient of {}\".format(self.trpo_coef))\n old_probs = tensor.tensor3('probs')\n else:\n old_probs = tensor.zeros_like(probs)\n prediction_logs = _prediction_subtensor(logs)\n\n # Compute value targets\n value_targets = (disconnected_grad(probs) * values).sum(axis=-1)\n value_targets = tensor.roll(value_targets, -1, axis=0)\n value_targets = (self.discount * value_targets * rolled_prediction_mask\n + rewards)\n value_targets = value_targets.astype(theano.config.floatX)\n\n total_costs = 0\n\n # Compute critic cost\n if not self.compute_targets:\n logger.debug(\"Using given targets\")\n value_targets = tensor.matrix('value_targets')\n if self.solve_bellman == 'no':\n logger.debug(\"Not solving Bellman, just predicting the rewards\")\n value_targets = rewards.copy(name='value_targets')\n elif self.solve_bellman == 'without_dp':\n future_rewards = rewards[::-1].cumsum(axis=0)[::-1]\n logger.debug(\"Solving Bellman, but without DP\")\n value_targets = future_rewards\n elif self.solve_bellman is not True:\n raise ValueError()\n critic_errors = prediction_values - value_targets\n if self.critic_loss == 'L2':\n logger.debug(\"L2 loss for the critic\")\n critic_costs_per_char = critic_errors ** 2 * prediction_mask\n elif self.critic_loss == 'huber':\n logger.debug(\"Huber loss for the critic\")\n use_L2 = tensor.lt(abs(critic_errors), 0.5)\n critic_costs_per_char = (use_L2 * critic_errors ** 2 +\n (1 - use_L2) * abs(critic_errors)) * prediction_mask\n else:\n raise ValueError()\n critic_costs = critic_costs_per_char[self.critic_burnin_steps:].sum(axis=0)\n if not self.freeze_critic:\n total_costs += critic_costs\n\n # Compute critic Monte-Carlo cost\n critic_monte_carlo_costs = (\n (((prediction_values - future_rewards) ** 2) * prediction_mask)\n [self.critic_burnin_steps:].sum(axis=0))\n\n # Value penalty\n if self.value_penalty:\n logger.debug(\"Use value penalty\")\n if self.value_penalty_type == 'L2':\n value_deviations = (values - values.mean(axis=-1, keepdims=True)) ** 2\n elif self.value_penalty_type == 'L1':\n value_deviations = abs(values - values.mean(axis=-1, keepdims=True))\n else:\n raise ValueError(\"unknown value penalty type {}\".format(self.value_penalty_type))\n if not self.freeze_critic:\n total_costs += (\n self.value_penalty *\n (value_deviations.sum(axis=-1) * prediction_mask)\n [self.critic_burnin_steps:].sum(axis=0))\n\n # Compute actor cost\n if self.critic:\n # The actor cost will be minimized, that's why values\n # must be negated.\n est_name = self.actor_grad_estimate\n if est_name == 'all_actions':\n disadvantages = disconnected_grad(\n values.max(axis=-1)[:, :, None] - values)\n actor_costs = ((probs * disadvantages).sum(axis=-1)\n * prediction_mask)\n actor_costs = actor_costs[self.critic_burnin_steps:]\n elif est_name.startswith('1_action'):\n # Here we do not provide a target for the first step for\n # the reason we lack an estimate of the value of the initial state.\n # This is how our critic works.\n # Hopefully the network won't unlearn\n # to produce a BOS first.\n future_reward_estimate = (future_rewards\n if est_name.endswith('unbiased')\n else prediction_values)\n weights = -disconnected_grad(\n future_reward_estimate[1:] + rewards[:-1] - prediction_values[:-1])\n actor_costs = ((prediction_logs[1:] * weights) * prediction_mask[1:])\n actor_costs = actor_costs[self.critic_burnin_steps + 1:]\n else:\n raise ValueError\n actor_costs = actor_costs.sum(axis=0)\n\n actor_entropies = (probs * -logs).sum(axis=-1) * prediction_mask\n actor_entropies = actor_entropies[self.critic_burnin_steps:].sum(axis=0)\n old_actor_cross_entropies = (old_probs * -logs).sum(axis=-1) * prediction_mask\n old_actor_cross_entropies = old_actor_cross_entropies[self.critic_burnin_steps:].sum(axis=0)\n critic_policy = disconnected_grad(\n self.softmax.apply(self.critic_policy_t * values, extra_ndim=1))\n critic_cross_entropies = (\n (critic_policy * -logs).sum(axis=-1)\n * prediction_mask)\n critic_cross_entropies = critic_cross_entropies[self.critic_burnin_steps:].sum(axis=0)\n actor_costs_with_penalties = (\n actor_costs\n - self.entropy_reward_coof * actor_entropies\n # But really, should it be minus here, below?\n - self.cross_entropy_reward_coof * critic_cross_entropies\n + self.trpo_coef * old_actor_cross_entropies)\n if not self.freeze_actor:\n total_costs += actor_costs_with_penalties\n else:\n total_costs += disconnected_grad(actor_costs_with_penalties)\n\n # Add auxiliary variables for intermediate steps of the computation\n application_call.add_auxiliary_variable(\n rewards, name='rewards')\n application_call.add_auxiliary_variable(\n value_biases, name='value_biases')\n application_call.add_auxiliary_variable(\n values.copy(), name='values')\n application_call.add_auxiliary_variable(\n outputs.copy(), name='outputs')\n application_call.add_auxiliary_variable(\n prediction_values, name='prediction_values')\n application_call.add_auxiliary_variable(\n prediction_outputs, name='prediction_outputs')\n application_call.add_auxiliary_variable(\n value_targets.copy(), name='value_targets')\n application_call.add_auxiliary_variable(\n probs.copy(), name='probs')\n application_call.add_auxiliary_variable(\n prediction_logs, name='prediction_log_probs')\n\n # Compute some statistics for debugging\n last_character_mask = prediction_mask - rolled_prediction_mask\n last_character_costs = (critic_costs_per_char * last_character_mask).sum(axis=0)\n mean2_output = (\n ((prediction_outputs ** 2) * prediction_mask).sum()\n / prediction_mask.sum()) ** 0.5\n max_output = abs(prediction_outputs * prediction_mask).max()\n expected_reward = (probs[0] * values[0]).sum(axis=-1)\n application_call.add_auxiliary_variable(\n last_character_costs, name='last_character_costs')\n application_call.add_auxiliary_variable(\n critic_costs.mean(), name='mean_critic_cost')\n application_call.add_auxiliary_variable(\n critic_monte_carlo_costs.mean(), name='mean_critic_monte_carlo_cost')\n if self.critic:\n application_call.add_auxiliary_variable(\n actor_costs.mean(), name='mean_actor_cost')\n application_call.add_auxiliary_variable(\n actor_entropies.mean(), name='mean_actor_entropy')\n application_call.add_auxiliary_variable(\n expected_reward.mean(), name='mean_expected_reward')\n application_call.add_auxiliary_variable(\n mean2_output, name='mean2_output')\n application_call.add_auxiliary_variable(\n max_output, name='max_output')\n\n return total_costs\n", "repo_name": "rizar/actor-critic-public", "sub_path": "lvsr/bricks/readouts.py", "file_name": "readouts.py", "file_ext": "py", "file_size_in_byte": 23722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 166, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "blocks_extras.bricks.sequence_generator2.SoftmaxReadout", "line_number": 17, "usage_type": "name"}, {"api_name": "blocks.bricks.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 39, "usage_type": "call"}, {"api_name": "blocks.utils.dict_subset", "line_number": 41, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 50, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 61, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 67, "usage_type": "call"}, {"api_name": "blocks.bricks.base.application", "line_number": 35, "usage_type": "name"}, {"api_name": "blocks_extras.bricks.sequence_generator2.MergeReadout", "line_number": 80, "usage_type": "name"}, {"api_name": "blocks.utils.shared_floatx_nans", "line_number": 96, "usage_type": "call"}, {"api_name": "blocks.roles.add_role", "line_number": 97, "usage_type": "call"}, {"api_name": "blocks.roles.WEIGHT", "line_number": 97, "usage_type": "argument"}, {"api_name": "blocks.bricks.base.application", "line_number": 111, "usage_type": "name"}, {"api_name": "blocks.utils.dict_subset", "line_number": 121, "usage_type": "call"}, {"api_name": "theano.tensor.repeat", "line_number": 122, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 122, "usage_type": "name"}, {"api_name": "theano.tensor.arange", "line_number": 123, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 123, "usage_type": "name"}, {"api_name": "theano.tensor.addbroadcast", "line_number": 126, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 126, "usage_type": "name"}, {"api_name": "theano.tensor.ones_like", "line_number": 132, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 132, "usage_type": "name"}, {"api_name": "theano.tensor.set_subtensor", "line_number": 133, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 133, "usage_type": "name"}, {"api_name": "theano.tensor.ones_like", "line_number": 140, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 140, "usage_type": "name"}, {"api_name": "theano.tensor.set_subtensor", "line_number": 141, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 141, "usage_type": "name"}, {"api_name": "blocks.bricks.base.application", "line_number": 119, "usage_type": "name"}, {"api_name": "blocks.utils.dict_subset", "line_number": 156, "usage_type": "call"}, {"api_name": "theano.tensor.repeat", "line_number": 157, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 157, "usage_type": "name"}, {"api_name": "theano.tensor.arange", "line_number": 158, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 158, "usage_type": "name"}, {"api_name": "theano.tensor.addbroadcast", "line_number": 161, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 161, "usage_type": "name"}, {"api_name": "theano.tensor.ones_like", "line_number": 167, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 167, "usage_type": "name"}, {"api_name": "theano.tensor.set_subtensor", "line_number": 168, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 168, "usage_type": "name"}, {"api_name": "theano.tensor.ones_like", "line_number": 174, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 174, "usage_type": "name"}, {"api_name": "theano.tensor.set_subtensor", "line_number": 175, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 175, "usage_type": "name"}, {"api_name": "blocks.bricks.base.application", "line_number": 153, "usage_type": "name"}, {"api_name": "blocks_extras.bricks.sequence_generator2.SoftmaxReadout", "line_number": 188, "usage_type": "name"}, {"api_name": "blocks.bricks.Linear", "line_number": 227, "usage_type": "call"}, {"api_name": "blocks.bricks.NDimensionalSoftmax", "line_number": 251, "usage_type": "call"}, {"api_name": "blocks.utils.dict_subset", "line_number": 263, "usage_type": "call"}, {"api_name": "blocks.bricks.base.application", "line_number": 261, "usage_type": "name"}, {"api_name": "theano.tensor.arange", "line_number": 278, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 278, "usage_type": "name"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 282, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 283, "usage_type": "call"}, {"api_name": "theano.tensor.repeat", "line_number": 293, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 293, "usage_type": "name"}, {"api_name": "theano.tensor.fill", "line_number": 294, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 294, "usage_type": "name"}, {"api_name": "theano.tensor.repeat", "line_number": 295, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 295, "usage_type": "name"}, {"api_name": "theano.tensor.fill", "line_number": 296, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 296, "usage_type": "name"}, {"api_name": "theano.tensor.concatenate", "line_number": 297, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 297, "usage_type": "name"}, {"api_name": "theano.tensor.concatenate", "line_number": 298, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 298, "usage_type": "name"}, {"api_name": "theano.tensor.zeros_like", "line_number": 303, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 303, "usage_type": "name"}, {"api_name": "theano.tensor.zeros_like", "line_number": 304, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 304, "usage_type": "name"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 311, "usage_type": "call"}, {"api_name": "theano.tensor.concatenate", "line_number": 315, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 315, "usage_type": "name"}, {"api_name": "theano.tensor.zeros_like", "line_number": 316, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 316, "usage_type": "name"}, {"api_name": "blocks.graph.ComputationGraph", "line_number": 317, "usage_type": "call"}, {"api_name": "blocks.filter.VariableFilter", "line_number": 318, "usage_type": "call"}, {"api_name": "blocks.roles.OUTPUT", "line_number": 320, "usage_type": "name"}, {"api_name": "blocks.utils.dict_subset", "line_number": 326, "usage_type": "call"}, {"api_name": "theano.tensor.inc_subtensor", "line_number": 334, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 334, "usage_type": "name"}, {"api_name": "theano.tensor.zeros_like", "line_number": 343, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 343, "usage_type": "name"}, {"api_name": "theano.tensor.roll", "line_number": 347, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 347, "usage_type": "name"}, {"api_name": "theano.tensor.set_subtensor", "line_number": 348, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 348, "usage_type": "name"}, {"api_name": "theano.tensor.exp", "line_number": 353, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 353, "usage_type": "name"}, {"api_name": "theano.tensor.tensor3", "line_number": 356, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 356, "usage_type": "name"}, {"api_name": "theano.tensor.zeros_like", "line_number": 358, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 358, "usage_type": "name"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 362, "usage_type": "call"}, {"api_name": "theano.tensor.roll", "line_number": 363, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 363, "usage_type": "name"}, {"api_name": "theano.config", "line_number": 366, "usage_type": "attribute"}, {"api_name": "theano.tensor.matrix", "line_number": 373, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 373, "usage_type": "name"}, {"api_name": "theano.tensor.lt", "line_number": 389, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 389, "usage_type": "name"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 424, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 438, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 450, "usage_type": "call"}, {"api_name": "theano.gradient.disconnected_grad", "line_number": 465, "usage_type": "call"}, {"api_name": "blocks.bricks.base.application", "line_number": 267, "usage_type": "name"}]} +{"seq_id": "28523126010", "text": "import base64\nimport json\n\nfrom flask import Flask, render_template, request\n\nfrom nn import main as predict\n\napp = Flask(__name__)\n\nstock_list = [\n \"MARUTI.NS\", \"BRITANNIA.NS\", \"BHARTIARTL.NS\", \"GRASIM.NS\", \"COALINDIA.NS\",\n \"KOTAKBANK.NS\", \"INDUSINDBK.NS\", \"HDFCLIFE.NS\", \"ITC.NS\", \"RELIANCE.NS\",\n \"TITAN.NS\", \"TCS.NS\", \"BAJAJFINSV.NS\", \"TATASTEEL.NS\", \"BAJAJFINANCE.NS\",\n \"BAJAJ-AUTO.NS\", \"NTPC.NS\", \"LT.NS\", \"HEROMOTOCO.NS\", \"ICICIBANK.NS\",\n \"SHREECEM.NS\", \"TECHM.NS\", \"TATACONSUM.NS\", \"ONGC.NS\", \"NESTLEIND.NS\",\n \"CIPLA.NS\", \"ULTRACEMCO.NS\", \"HINDALCO.NS\", \"MM.NS\", \"WIPRO.NS\", \"BTC-INR\"\n]\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"GET\":\n return render_template(\"index.html\", stock_list=stock_list, data={})\n stock_name = request.form[\"stock_name\"]\n end_date = request.form[\"end_date\"]\n data = json.loads(predict(stock_name, end_date))\n with open(\"static/plot.png\", \"rb\") as image_file:\n plot_img = base64.b64encode(image_file.read()).decode()\n return render_template(\"index.html\",\n stock_list=stock_list,\n plot_img_b64=plot_img,\n data=data)\n", "repo_name": "decimalpack/Stock-RNN", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "nn.main", "line_number": 26, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "3582772308", "text": "import argparse\nimport ray\nimport ray.tune as tune\nfrom ray.rllib.agents import ppo\nfrom gym_hls.envs.hls_env import HLSEnv\nfrom gym_hls.envs.hls_multi_env import HLSMultiEnv\nparser = argparse.ArgumentParser()\nparser.add_argument('--checkpoint_dir', '-cpd', type=str)\nargs = parser.parse_args()\nray.init()\n\nenv_config = {\n 'verbose': True,\n 'feature_type':'act_pgm'\n }\nconfig_restore = {\n \"sample_batch_size\": 50,\n \"train_batch_size\": 200,\n \"sgd_minibatch_size\": 40,\n #\"model\": {\"use_lstm\": True},\n \"horizon\": 45,\n \"num_gpus\": 2,\n \"num_workers\": 7,\n #\"lr\": tune.grid_search([0.01, 0.001, 0.0001]),\n \"env_config\": env_config,\n }\ntune.run_experiments({\n \"restore_ppo\": {\n \"run\": \"PPO\",\n \"env\": HLSMultiEnv,\n \"restore\": args.checkpoint_dir,\n \"checkpoint_freq\":10,\n \"config\": config_restore\n },\n})\n", "repo_name": "ucb-bar/autophase", "sub_path": "algos/rl/restore.py", "file_name": "restore.py", "file_ext": "py", "file_size_in_byte": 977, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "ray.init", "line_number": 10, "usage_type": "call"}, {"api_name": "ray.tune.run_experiments", "line_number": 27, "usage_type": "call"}, {"api_name": "ray.tune", "line_number": 27, "usage_type": "name"}, {"api_name": "gym_hls.envs.hls_multi_env.HLSMultiEnv", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "1220309332", "text": "import datetime\nimport os\nimport random\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog, messagebox\nfrom scrapy.crawler import CrawlerRunner\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.spiderloader import SpiderLoader\nfrom scrapy.utils import project\nfrom scrapy.utils.log import configure_logging\nfrom twisted.internet import reactor\nimport threading\n\nclass PrintLogger(object): # create file like object\n def __init__(self, textbox): # pass reference to text widget\n self.textbox = textbox # keep ref\n\n def write(self, text): # make field editable\n self.textbox.insert(tk.END, text) # write text to textbox\n\n def flush(self): # needed for file like object\n pass\ndef user_agents():\n ua_list= [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',\n\n ]\n return random.choice(ua_list)\n\nclass App(tk.Tk):\n def __init__(self):\n super().__init__()\n self.title('Yellow Pages Scraper')\n self.geometry('400x200')\n self.resizable(0, 0)\n self.location = tk.StringVar(self)\n self.distance_update = tk.StringVar(self, value=0)\n self.distance_val = tk.StringVar(self, value=50000)\n\n self.keyword_text = tk.StringVar(self, 'restuarants')\n self.location_text = tk.StringVar(self)\n self.ouput = tk.StringVar(self)\n self.folder_path_text = tk.StringVar(\n self, value=os.path.join(os.path.join(os.path.expanduser('~'), 'Desktop')))\n self.chosen_spider = tk.StringVar(self)\n self.chosen_spider.set('Select')\n self.execute_thread = None\n self.feed_options = ['json', 'csv']\n self.feed_text = tk.StringVar(self, value=self.feed_options[1])\n\n self.columnconfigure(0, weight=4)\n self.columnconfigure(1, weight=1)\n self.__create_widgets()\n\n def __create_widgets(self):\n input_frame = ttk.Frame(self)\n input_frame.columnconfigure(0, weight=1)\n input_frame.columnconfigure(1, weight=3)\n ttk.Label(input_frame, text='Find:').grid(column=0, row=0, sticky=tk.W)\n keyword = ttk.Entry(input_frame, width=30, textvariable=self.keyword_text)\n keyword.focus()\n keyword.grid(column=1, row=0, sticky=tk.W)\n ttk.Label(input_frame, text='Location:').grid(column=0, row=1, sticky=tk.W)\n location = ttk.Entry(input_frame, width=30, textvariable=self.location_text)\n location.grid(column=1, row=1, sticky=tk.W)\n #\n # ttk.Label(input_frame, text='Distance:').grid(column=0, row=2, sticky=tk.W)\n # self.distance = ttk.Entry(input_frame, width=30, textvariable=self.distance_val)\n # self.distance.grid(column=1, row=2, sticky=tk.W)\n\n lbl_frame = ttk.LabelFrame(input_frame, text='Feed Type:')\n lbl_frame.grid(column=0, row=4, sticky='W')\n\n ttk.Combobox(lbl_frame, textvariable=self.feed_text, values=self.feed_options, width=10).grid(column=0, row=0,\n sticky=tk.W)\n input_frame.grid(column=0, row=0)\n for widget in input_frame.winfo_children():\n widget.grid(padx=0, pady=5)\n\n button_frame = ttk.Frame(self)\n button_frame.columnconfigure(0, weight=1)\n\n spiders = [s for s in self.get_spiders()]\n\n ttk.Combobox(button_frame, textvariable=self.chosen_spider, values=spiders, width=10).grid(column=0, row=0,\n sticky=tk.W)\n ttk.Button(button_frame, text='Start', command=lambda: self.execute_threading(None)).grid(column=0, row=1)\n ttk.Button(button_frame, text='Save To', command=self.browse_btn).grid(column=0, row=2)\n ttk.Label(button_frame, text='save_path', textvariable=self.folder_path_text, wraplength=50).grid(column=0,\n row=3)\n button_frame.grid(column=1, row=0, sticky='NW')\n for widget in button_frame.winfo_children():\n widget.grid(padx=0, pady=5)\n for widget in self.winfo_children():\n widget.grid(padx=0, pady=3)\n\n # def update_distance(self,event):\n # self.distance_update.set(f'{round(self.distance.get())} Km')\n # self.distance_val.set(round()\n def browse_btn(self):\n folder_path = filedialog.askdirectory()\n self.folder_path_text.set(folder_path)\n\n def choose_feed(self, value):\n self.feed_text.set(value)\n\n def get_spiders(self):\n return [s for s in SpiderLoader.from_settings(project.get_project_settings()).list()]\n\n def execute_spider(self):\n # custom_feeds = ['title', 'author', 'date', 'article', 'link']\n if self.keyword_text.get() == '':\n messagebox.showerror(\n 'Error', 'Keyword should not be None')\n self.execute_thread = None\n return\n\n if self.feed_text.get() not in self.feed_options:\n messagebox.showerror(\n 'Error', 'Please choose an output Feed')\n self.execute_thread = None\n return\n\n ran = datetime.datetime.timestamp(datetime.datetime.now())\n try:\n output_url = f'file:///{self.folder_path_text.get()}/YP_file{str(ran).replace(\".\", \"\")}.{self.feed_text.get()}'\n\n setting = project.get_project_settings()\n\n setting.set('FEEDS', {output_url: {'format': self.feed_text.get()}})\n # setting.set('FEED_EXPORT_FIELDS', custom_feeds)\n\n if self.chosen_spider.get().startswith('yp'):\n custom_settings = {\n 'SELENIUM_DRIVER_NAME': 'chrome',\n 'SELENIUM_DRIVER_EXECUTABLE_PATH': ChromeDriverManager().install(),\n 'SELENIUM_DRIVER_ARGUMENTS': ['--incognito',f'user-agent={user_agents()}',\"start-maximized\"],\n 'DOWNLOADER_MIDDLEWARES': {\n\n 'scrapyselenium.SeleniumMiddleware': 800\n\n },\n\n }\n setting.update(custom_settings)\n runner = CrawlerRunner(setting)\n\n configure_logging()\n\n d = runner.crawl(self.chosen_spider.get(), kword=self.keyword_text.get(), location=self.location_text.get())\n #\n d.addBoth(lambda _: reactor.stop())\n reactor.run(installSignalHandlers=False)\n messagebox.showinfo('success', 'The data has been scraped.')\n\n except CloseSpider as err:\n messagebox.showerror('Stopped', err.reason)\n self.execute_btn['state'] = 'enable'\n\n def execute_threading(self, event):\n self.execute_thread = threading.Thread(\n target=self.execute_spider, daemon=True)\n if self.execute_thread is not None:\n try:\n if not self.execute_thread.is_alive():\n self.execute_thread.start()\n self.after(10, self.check_thread)\n except AttributeError:\n pass\n\n def check_thread(self):\n if self.execute_thread.is_alive():\n self.after(10, self.check_thread)\n\n\nif __name__ == '__main__':\n root = App()\n root.attributes('-topmost', 1)\n root.mainloop()\n", "repo_name": "pyfuncode/yellow_pages", "sub_path": "yps.py", "file_name": "yps.py", "file_ext": "py", "file_size_in_byte": 7711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tkinter.END", "line_number": 21, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 32, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 46, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 49, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.ttk.Frame", "line_number": 60, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 60, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 63, "usage_type": "name"}, {"api_name": "tkinter.W", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Entry", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 64, "usage_type": "name"}, {"api_name": "tkinter.W", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Label", "line_number": 67, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 67, "usage_type": "name"}, {"api_name": "tkinter.W", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Entry", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 68, "usage_type": "name"}, {"api_name": "tkinter.W", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 75, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 78, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 78, "usage_type": "name"}, {"api_name": "tkinter.W", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Frame", "line_number": 84, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 84, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 89, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 89, "usage_type": "name"}, {"api_name": "tkinter.W", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Button", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 91, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 92, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 93, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 93, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 105, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 105, "usage_type": "name"}, {"api_name": "scrapy.spiderloader.SpiderLoader.from_settings", "line_number": 112, "usage_type": "call"}, {"api_name": "scrapy.spiderloader.SpiderLoader", "line_number": 112, "usage_type": "name"}, {"api_name": "scrapy.utils.project.get_project_settings", "line_number": 112, "usage_type": "call"}, {"api_name": "scrapy.utils.project", "line_number": 112, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 117, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 123, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 123, "usage_type": "name"}, {"api_name": "datetime.datetime.timestamp", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 128, "usage_type": "call"}, {"api_name": "scrapy.utils.project.get_project_settings", "line_number": 132, "usage_type": "call"}, {"api_name": "scrapy.utils.project", "line_number": 132, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 140, "usage_type": "call"}, {"api_name": "scrapy.crawler.CrawlerRunner", "line_number": 150, "usage_type": "call"}, {"api_name": "scrapy.utils.log.configure_logging", "line_number": 152, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.stop", "line_number": 156, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 156, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 157, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 157, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 158, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 158, "usage_type": "name"}, {"api_name": "scrapy.exceptions.CloseSpider", "line_number": 160, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 161, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 161, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "34936559513", "text": "from collections import Counter\r\nimport math\r\n\r\n# Creamos un objeto \"Info\" que nos devolverá la función \"codificar()\". Este contendrá toda\r\n# la info de la codificación: el árbol de huffman, el código y una tabla. La tabla contiene para \r\n# cada letra, su codificación y su frecuencia: \"tabla[letra] = [frecuencia, codificacion]\"\r\nclass Info:\r\n def __init__(self):\r\n self.tabla = dict()\r\n self.arbol = None\r\n self.codigo = None\r\n self.texto = None\r\n\r\n# crear arbol de huffman\r\ndef crear_arbol(texto, info):\r\n # contar las veces que aparece cada letra: lista de tuplas '(letra, número de veces que aparece)'\r\n c = Counter(texto)\r\n N = len(texto)\r\n for k,v in c.items():\r\n info.tabla[k] = [v/N, \"\"]\r\n # ordenarlas de mayor a menor (número de apariciones)\r\n tuplas = sorted(c.items(), key=lambda x : x[1])\r\n tuplas.reverse()\r\n # crear arbol de forma recursiva\r\n N = len(tuplas)\r\n for k in range(N-1):\r\n # coger los dos arboles de menos peso: a1 y a2\r\n a1, p1 = tuplas[-1]\r\n a2, p2 = tuplas[-2]\r\n del tuplas[-1]\r\n del tuplas[-1]\r\n # unir a1 y a2 \r\n a = (a1, a2)\r\n peso = p1 + p2\r\n union = [a,peso]\r\n # guardar la unión\r\n if k == N-2:\r\n tuplas.append(union)\r\n else:\r\n length = len(tuplas)\r\n for i in range(length):\r\n if tuplas[i][1] <= peso:\r\n tuplas.insert(i, union)\r\n break\r\n elif i == length-1:\r\n tuplas.append(union)\r\n # en 'tuplas' nos queda finalmente un único árbol, el que buscamos\r\n info.arbol = tuplas[0][0]\r\n\r\n# crear un diccionario con los códigos (0 / 1) de cada letra\r\ndef crear_tabla_codigos(arbol, prefijo, info):\r\n # comprobar si es una hoja\r\n if type(arbol) == str:\r\n info.tabla[arbol][1] = prefijo\r\n # añadir \"0\" y \"1\" al prefijo según vayamos a la izquierda o derecha del árbol\r\n else:\r\n crear_tabla_codigos(arbol[0], prefijo + \"0\", info)\r\n crear_tabla_codigos(arbol[1], prefijo + \"1\", info)\r\n\r\n# codificar el texto con una cierta tabla de codigos, es ir letra por letra sustituyendola por su código\r\ndef codificar_con_tabla(texto, tabla):\r\n codigo = \"\"\r\n for c in texto:\r\n codigo += tabla[c][1]\r\n return codigo\r\n\r\n# codificar mensaje desde 0\r\ndef codificar(texto):\r\n info = Info()\r\n info.texto = texto\r\n crear_arbol(texto, info)\r\n crear_tabla_codigos(info.arbol, \"\", info)\r\n info.codigo = codificar_con_tabla(texto, info.tabla)\r\n return info\r\n \r\n# descodificar mensaje\r\ndef descodificar(codigo, arbol):\r\n texto = \"\"\r\n actual = arbol\r\n for c in codigo:\r\n if c == \"0\":\r\n actual = actual[0]\r\n else:\r\n actual = actual[1]\r\n if type(actual) == str:\r\n texto += actual\r\n actual = arbol\r\n return texto\r\n\r\n\r\n# -------------------------------------------------------------------------\r\n# PARTE DE EJERCICIOS\r\n# -------------------------------------------------------------------------\r\n\r\nwith open('ingles.txt', 'r',encoding=\"utf8\") as file:\r\n en = file.read()\r\n \r\nwith open('español.txt', 'r',encoding=\"utf8\") as file:\r\n es = file.read()\r\n\r\n# Codificamos y guardarmos la información en tuplas (español , inglés)\r\ninfo = (codificar(es), codificar(en))\r\nnombre = (\"Español\", \"Ingles\")\r\ntexto = (es, en)\r\n\r\n# Entropía\r\nH = lambda tabla : -sum([v[0]*math.log2(v[0]) for k,v in tabla.items()])\r\n# Longitud media\r\nL = lambda tabla : sum([tabla[letra][0] * len(tabla[letra][1]) for letra in tabla.keys()])\r\n# Error\r\nE = lambda info : math.sqrt( (1.0/len(info.texto))**2 * sum([ (math.log2(v[0]) + 1.0/math.log(2))**2 for v in info.tabla.values()]) )\r\n\r\n\r\ndef pregunta1():\r\n for i in range(2):\r\n print(\"\\n1) Idioma:\", nombre[i])\r\n longitud = L(info[i].tabla)\r\n print(\"2) Longitud media: {:.4f}\".format(longitud))\r\n print(\"3.0) Error:\")\r\n print(\" E(C) =\", E(info[i])) # => redondeamos a 2 decimales\r\n print(\"3) Comprobación del primer teorema de Shannon:\")\r\n print(\" H(C) = {:.2f}\".format(H(info[i].tabla)), \", L(C) = {:.2f}\".format(longitud), \" => H(C) <= L(C) < H(C) + 1\")\r\n print(\"4) Codigos del alfabeto:\")\r\n for letra in info[i].tabla.keys():\r\n l = letra if letra != \"\\n\" else \"\\\\n\"\r\n print(f\"\\t[{l}] ->\", info[i].tabla[letra][1])\r\n\r\ndef pregunta2():\r\n X = \"dimension\"\r\n for i in range(2):\r\n cod = codificar_con_tabla(X, info[i].tabla)\r\n print(f\"\\n[{nombre[i]}] \\nCodificacion de 'dimension':\", cod)\r\n n = len(cod)\r\n N = 8 # log_2(256) = 8 -> siendo 256, el nº de carcateres que se usan en el código ascii\r\n print(\"Longitud:\", n, f\"\\t(Longitud ascii: {N*len(X)})\")\r\n\r\ndef pregunta3():\r\n print(f\"\\n[{nombre[1]}]\")\r\n palabra = \"isomorphism\"\r\n cod = codificar_con_tabla(palabra, info[1].tabla)\r\n decod = descodificar(cod, info[1].arbol)\r\n print(f\"Decodificacion de '{cod}':\", decod)\r\n\r\ndef main():\r\n print(\"\\nPREGUNTA 1:\")\r\n pregunta1()\r\n print(\"\\nPREGUNTA 2:\")\r\n pregunta2()\r\n print(\"\\nPREGUNTA 3:\")\r\n pregunta3()\r\n print()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "JavierAM01/Arbol-de-Huffman-y-Primer-Teorema-de-Shannon", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5323, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.Counter", "line_number": 17, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 107, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 111, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 111, "usage_type": "call"}, {"api_name": "math.log", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "3057525472", "text": "from botocore.exceptions import ClientError\nfrom services.bulk_upload_service import BulkUploadService\nfrom utils.audit_logging_setup import LoggingService\nfrom utils.decorators.override_error_check import override_error_check\nfrom utils.decorators.set_audit_arg import set_request_context_for_logging\nfrom utils.exceptions import InvalidMessageException, PdsTooManyRequestsException\nfrom utils.lloyd_george_validator import LGInvalidFilesException\n\nlogger = LoggingService(__name__)\n\n\n@set_request_context_for_logging\n@override_error_check\ndef lambda_handler(event, _context):\n logger.info(\"Received event. Starting bulk upload process\")\n bulk_upload_service = BulkUploadService()\n\n if \"Records\" not in event:\n logger.error(f\"No sqs messages found in event: {event}. Will ignore this event\")\n return\n\n for index, message in enumerate(event[\"Records\"], start=1):\n try:\n logger.info(f\"Processing message {index} of {len(event['Records'])}\")\n bulk_upload_service.handle_sqs_message(message)\n except PdsTooManyRequestsException as error:\n logger.error(error)\n logger.info(\"Cannot process for now due to PDS rate limit reached.\")\n logger.info(\n \"All remaining messages in this batch will be returned to sqs queue to retry later.\"\n )\n\n all_unprocessed_message = event[\"Records\"][index - 1 :]\n for unprocessed_message in all_unprocessed_message:\n bulk_upload_service.put_sqs_message_back_to_queue(unprocessed_message)\n return\n except (\n ClientError,\n InvalidMessageException,\n LGInvalidFilesException,\n KeyError,\n TypeError,\n AttributeError,\n ) as error:\n logger.info(f\"Fail to process current message due to error: {error}\")\n logger.info(\"Continue on next message\")\n logger.info(f\"Finished processing all {len(event['Records'])} messages\")\n", "repo_name": "nhsconnect/national-document-repository", "sub_path": "lambdas/handlers/bulk_upload_handler.py", "file_name": "bulk_upload_handler.py", "file_ext": "py", "file_size_in_byte": 2009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "utils.audit_logging_setup.LoggingService", "line_number": 9, "usage_type": "call"}, {"api_name": "services.bulk_upload_service.BulkUploadService", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.exceptions.PdsTooManyRequestsException", "line_number": 26, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.exceptions.InvalidMessageException", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.lloyd_george_validator.LGInvalidFilesException", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.decorators.set_audit_arg.set_request_context_for_logging", "line_number": 12, "usage_type": "name"}, {"api_name": "utils.decorators.override_error_check.override_error_check", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "9222131344", "text": "from datetime import datetime\nfrom typing import Optional, Callable, Dict, Union\n\nfrom lxml import etree\n\nfrom swim_aim.xml import MappedValueType\n\n__author__ = \"EUROCONTROL (SWIM)\"\n\n\nclass XMLMapperField:\n\n def __init__(self,\n xpath: str,\n post_map: Optional[Callable] = None,\n namespaces: Optional[Dict[str, str]] = None,\n strict: Optional[bool] = True) -> None:\n \"\"\"\n :param xpath: the xpath of the element that this field represents (maps)\n :param post_map: a callable to be called the respective element has been mapped\n :param namespaces: the namespaces used in the XML file where the element comes from\n :param strict: to be used for strict validation of the mapped value\n \"\"\"\n if not xpath.startswith('./'):\n raise ValueError('Invalid xpath')\n\n self.xpath = xpath\n self.namespaces = namespaces\n self.strict = strict\n self._post_map = post_map\n self._xpath_tree = xpath.split('/')\n self._xpath_leaf = self._xpath_tree[-1]\n\n def _get_value_from_element(self, element: etree.Element) -> MappedValueType:\n \"\"\"\n Retrieves the value of the provided element based on the xpath\n\n :param element:\n :return:\n \"\"\"\n # the xpath is deeper than one element\n if len(self._xpath_tree) > 1:\n element = element.find(self.xpath, self.namespaces)\n\n return element.text if element is not None else None\n\n def _get_value_from_attribute(self, element: etree.Element) -> MappedValueType:\n \"\"\"\n Retrieves the value of the attribute specified in the xpath from the provided element\n\n :param element:\n :return:\n \"\"\"\n # discard @ from the beginning of the attribute name\n attribute_name = self._xpath_leaf[1:]\n\n # cleanup attribute_name in case it contains namespace code .i.e xlink:href\n if ':' in attribute_name:\n ns_code, attr_name = attribute_name.split(':')\n namespace = self.namespaces[ns_code]\n attribute_name = f'{{{namespace}}}{attr_name}'\n\n # the xpath is deeper than one element (plus the attribute name)\n if len(self._xpath_tree) > 2:\n xpath_path = \"/\".join(self._xpath_tree[:-1])\n element = element.find(xpath_path, self.namespaces)\n\n return element.get(attribute_name) if element is not None else None\n\n def _get_value(self, element):\n func = self._get_value_from_attribute if self._xpath_leaf.startswith('@') else self._get_value_from_element\n\n return func(element)\n\n def from_xml(self, element: etree.Element) -> MappedValueType:\n \"\"\"\n The main function to be called in order to retrieve the value of an XML element or attribute\n :param element:\n :return:\n \"\"\"\n value = self._get_value(element)\n\n if self._post_map and value is not None:\n value = self._post_map(value)\n\n return value\n\n\nclass IntegerXMLMapperField(XMLMapperField):\n\n def _get_value(self, element: etree.Element) -> Union[int, str, None]:\n \"\"\"\n Overrides the parent method by converting the XML value to integer.\n :param element:\n :return:\n \"\"\"\n value: str = super()._get_value(element)\n\n try:\n return int(value)\n except (ValueError, TypeError):\n if self.strict:\n raise\n\n return value\n\n\nclass FloatXMLMapperField(XMLMapperField):\n\n def _get_value(self, element: etree.Element) -> Union[float, str, None]:\n \"\"\"\n Overrides the parent method by converting the XML value to float.\n\n :param element:\n :return:\n \"\"\"\n value: str = super()._get_value(element)\n\n try:\n return float(value)\n except (ValueError, TypeError):\n if self.strict:\n raise\n\n return value\n\n\nclass DatetimeXMLMapperField(XMLMapperField):\n\n def __init__(self, xpath: str, str_format: str = '%Y-%m-%dT%H:%M:%S', **kwargs) -> None:\n \"\"\"\n\n :param xpath:\n :param str_format:\n :param kwargs:\n \"\"\"\n super().__init__(xpath, **kwargs)\n self.str_format = str_format\n\n def _get_value(self, element: etree.Element) -> Union[datetime, None]:\n \"\"\"\n Overrides the parent method by converting the XML value to datetime.\n\n :param element:\n :return:\n \"\"\"\n value: str = super()._get_value(element)\n\n return datetime.strptime(value, self.str_format) if value else None\n", "repo_name": "eurocontrol-swim/swim-aim", "sub_path": "swim_aim/xml/mapper_fields.py", "file_name": "mapper_fields.py", "file_ext": "py", "file_size_in_byte": 4658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 34, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 34, "usage_type": "name"}, {"api_name": "swim_aim.xml.MappedValueType", "line_number": 34, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 47, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 47, "usage_type": "name"}, {"api_name": "swim_aim.xml.MappedValueType", "line_number": 47, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 75, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 75, "usage_type": "name"}, {"api_name": "swim_aim.xml.MappedValueType", "line_number": 75, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 91, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 91, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 91, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 110, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 110, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 140, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 140, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 140, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "4615737618", "text": "import os\n\nfrom flask import current_app as app\n\n\ndef load_list(project, list_type='eid', prefix='', module='providerAnalysis'):\n \"\"\"\n loads a list of strings from a txt file\n :param project: the project id\n :param list_type: the type of list, e.g. eid, isbn, issn etc.\n :param prefix: the prefix for the list to be saved (fixed, missed, etc.)\n :param module: the module for which the list is retrieved\n :return: the list of strings\n \"\"\"\n with app.app_context():\n location = app.config.get(\"LIBINTEL_DATA_DIR\")\n # path to the file\n path_to_file = location + '/' + module + '/' + project + '/' + prefix + '_' + list_type + '_list.txt'\n if not os.path.exists(path_to_file):\n return []\n with open(path_to_file) as f:\n eids = f.readlines()\n f.close()\n # remove whitespace characters like `\\n` at the end of each line\n return [x.strip() for x in eids]\n\n\ndef save_list(project, item_list, list_type='eid', prefix='', module='providerAnalysis'):\n \"\"\"\n saves a list of strings to a txt file\n :param project: the project id\n :param list_type: the type of list, e.g. eid, isbn, issn etc.\n :param prefix: the prefix for the list to be saved (fixed, missed, etc.)\n :param module: the module for which the list is retrieved\n :param item_list: the list to be saved\n :return: a boolean indicating whether the saving was successful\n \"\"\"\n with app.app_context():\n location = app.config.get(\"LIBINTEL_DATA_DIR\")\n folder = location + '/' + module + '/' + project + '/'\n if not os.path.exists(folder):\n os.makedirs(folder)\n try:\n with open(folder + prefix + '_' + list_type + '_list.txt', 'w') as list_file:\n for item in item_list:\n list_file.write(item + '\\n')\n list_file.close()\n return True\n except IOError:\n return False\n", "repo_name": "ETspielberg/libintel_scripts", "sub_path": "services/list_service.py", "file_name": "list_service.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.current_app.app_context", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.current_app.config.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.current_app.app_context", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.current_app.config.get", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "10621022804", "text": "from image import Image\nimport threading\nfrom role import Role\nfrom villager_listener import VillagerListener\nfrom constant import Constant\nfrom debug_print import *\nfrom skill import Skill\nimport json\nimport pygame\nfrom land import Land\nfrom attack import AttackAnimation\nfrom house import House\nfrom item import Item\nfrom constant_image import ConstantImage\nimport random\n\n\nclass Villager(Image, threading.Thread):\n\n lock = threading.RLock()\n # for testing purpose only want to create one leader\n leader_taken = False\n \n\n def __init__(self, image, position, villager_id, font, listener, current_leader, skill_images):\n self.role = Role.FOLLOWER\n self.listener = listener\n self.current_leader = current_leader\n self.leadership_term = 0\n # render shouting\n self.message_count = 1\n # for testing to only create one leader\n self.skills = []\n self.skill_adding_list = []\n self.max_health = Constant.VILLAGER_MAX_HP\n self.current_health = self.max_health\n self.current_message = \"\"\n self.message_countdown = 0\n self.learned_skill_names = []\n self.turning_learned_skills_list = []\n self.dead = False\n self.dead_message_sent = False\n width, height = image.get_rect().size\n center_x, center_y = position\n super().__init__(image, center_x, center_y, height, width)\n self.villager_id = villager_id\n self.font = font\n self.attacked = False\n self.item = []\n self.attack = None\n\n\n self.land = Land(self, Constant.LAND_SIZE)\n\n self.house = None\n self.build_house_countdown = Constant.BUILD_HOUSE_COUNT_DOWN\n\n threading.Thread.__init__(self)\n\n self.attack_probability = 0.5\n self.attack_display_count_down = Constant.ATTACK_DISPLAY_COUNT_DOWN\n self.attack_display_count_down_const = Constant.ATTACK_DISPLAY_COUNT_DOWN\n self.attacked = False\n self.attack_power = 1\n\n self.skill_images = skill_images\n\n\n def pickTile(self, tile):\n \"\"\"\n \n Check which tile is clicked by mouse, and applied its benefits to Villager\n \n :param tile: Tile \n :return: \n \"\"\"\n if tile.mature:\n if tile.tile_type == Constant.TILE_TYPE_PLANT:\n self.current_health_up_with_amount(Constant.PLANT_HEALTH_INCREASE)\n elif tile.tile_type == Constant.TILE_TYPE_ANIMAL:\n self.current_health_up_with_amount(Constant.ANIMAL_HEALTH_INCREASE)\n tile.un_mature()\n\n def addHouse(self):\n \"\"\"\n \n Add a house Object to Villager\n \n \"\"\"\n self.house = House(self.x, self.y)\n\n # armour\n def addItemToLeftHand(self, image, item_name, image_scale):\n \"\"\"\n \n Adding a item to the left hand side of the villager\n \n :param image: Image \n :param item_name: str\n :param image_scale: int\n :return: \n \"\"\"\n width, height = image.get_rect().size\n temp_item_center_x = self.x + width * image_scale // 2\n temp_item_center_y = self.y + width * image_scale\n temp_item = Item(image, temp_item_center_x, temp_item_center_y, item_name, image_scale)\n self.item.append(temp_item)\n\n # sword\n def addItemToRightHand(self, image, item_name, image_scale):\n \"\"\"\n\n Adding a item to the right hand side of the villager\n\n :param image: Image \n :param item_name: str\n :param image_scale: int\n :return: \n \"\"\"\n width, height = image.get_rect().size\n temp_item_center_x = self.x - width * image_scale\n temp_item_center_y = self.y\n temp_item = Item(image, temp_item_center_x, temp_item_center_y, item_name, image_scale)\n self.item.append(temp_item)\n\n def being_attacked(self, hp_decrement):\n \"\"\"\n \n if villager is attcked set the hp down\n \n :param hp_decrement: int\n \n \"\"\"\n self.current_health_down_with_amount(hp_decrement)\n\n\n\n def add_skill(self, skill_name):\n \"\"\"\n \n Add skill Object to player's skill list\n \n :param skill_name: str \n \n \"\"\"\n skill_num = len(self.skills)\n image = self.skill_images[skill_name]\n\n # each row render four skill, then go up\n one_skill = Skill(skill_name, image, self.x - self.width/2 - ((image.get_rect().size)[0] * Constant.SKILL_IMAGE_SCALE_VILLAGER) / 2, (self.y + self.height/2) - (int (skill_num) * int((image.get_rect().size)[1] * Constant.SKILL_IMAGE_SCALE_VILLAGER)), Constant.SKILL_IMAGE_SCALE_VILLAGER, False)\n self.skills.append(one_skill)\n\n\n def run(self):\n while (not self.dead) and (not self.listener.stopped):\n # consuming the parsed JSON message from the queue\n request = self.listener.request_queue.get()\n\n request_type = request[Constant.MESSAGE_TYPE]\n # according to the type of the request applying corresponding methods to villager\n if request_type == Constant.VILLAGER_DEAD:\n self.dead = True\n continue\n if request_type == Constant.APPEND and self.role == Role.LEADER:\n if not request[Constant.NEW_ENTRIES]:\n self.reclaim_authority()\n elif request_type == Constant.LEADERSHIP:\n self.set_leadership(request)\n elif request_type == Constant.REQUEST_VOTE:\n self.set_candidate(request)\n elif request_type == Constant.REQUEST_VOTE_REPLY:\n self.vote(request)\n elif request_type == Constant.REQUEST_COMMAND_ACK and self.role == Role.LEADER:\n self.leader_receive_learn(request)\n elif request_type == Constant.APPEND_REPLY:\n self.learning_skill(request)\n elif request_type == Constant.COMMIT_INDEX:\n self.learned_skill(request)\n if self.current_health == 0:\n debug_print(\"Villager\" + str(self.villager_id) + \" is dead\")\n self.dead = True\n\n if self.listener.stopped:\n print(str(self.villager_id) + \"'s listener is dead\")\n self.dead = True\n if self.dead:\n # if dead send the JSON to cooresponding remote Raft peer to ask it to terminate\n data = {Constant.MESSAGE_TYPE: \"villager_killed\", Constant.PEER_ID: self.listener.peer_id}\n try:\n self.listener.socket.sendall(str.encode(json.dumps(data) + \"\\n\"))\n print(\"villager killed message sent\")\n except ConnectionResetError:\n print(\"connection dead\")\n print(\"villager killed message sent\")\n self.listener.stop_listener()\n self.dead_message_sent = True\n\n\n def reclaim_authority(self):\n \"\"\"\n Display a dialgue box to show the string 'I'm the leader'\n \n \"\"\"\n self.set_message(Constant.AUTHORITY_MESSAGE)\n\n def set_leadership(self, request):\n \"\"\"\n \n trying to set the leader by this leadership request dictionary\n \n :param request: dict\n \"\"\"\n term = request[Constant.SENDER_TERM]\n # if there is still a leader and the term number of JSON messag is smaller than\n # current term ignore this outdated leader messge\n if self.current_leader and self.current_leader.leadership_term > term:\n return\n self.role = Role.LEADER\n self.leadership_term = term\n self.set_message(Constant.NEW_LEADER_MESSAGE)\n\n def set_candidate(self, request):\n \"\"\"\n \n set villager to candidate by this request_vote request\n \n :param request: dict\n \"\"\"\n term = request[Constant.SENDER_TERM]\n # abort the outdated request_vote message\n if self.current_leader and self.current_leader.leadership_term > term:\n return\n self.role = Role.CANDIDATE\n self.set_message(Constant.CANDIDATE_MESSAGE)\n\n def vote(self, request):\n term = request[Constant.SENDER_TERM]\n if self.current_leader and self.current_leader.leadership_term > term:\n return\n vote_for = request[Constant.VOTE_PEER_ID][4:]\n debug_print(type(request[Constant.VOTE_GRANTED]))\n if request[Constant.VOTE_GRANTED] == True:\n self.set_message(Constant.VOTE_MESSAGE.format(vote_for))\n\n def leader_receive_learn(self, request):\n skill_name = request[Constant.REQUEST_COMMAND_LIST][0]\n index = int(request[Constant.INDEX])\n if index == len(self.skills):\n self.add_skill(skill_name)\n while self.skill_adding_list:\n length = len(self.skills)\n if self.skill_adding_list[0][0] == length:\n skill = self.skill_adding_list.pop(0)\n self.add_skill(skill[1])\n else:\n break\n elif index > len(self.skills):\n self.skill_adding_list.append((index, skill_name))\n self.skill_adding_list.sort()\n\n def learning_skill(self, request):\n result = request[Constant.APPEND_RESULT]\n if result and self.current_leader:\n index = int(request[Constant.LAST_LOG_INDEX])\n if (index >= len(self.skills)) and (index < len(self.current_leader.skills)):\n for i in range(len(self.skills), index + 1):\n self.add_skill(self.current_leader.skills[i].skill_name)\n\n def learned_skill(self, request):\n debug_print(\"in learned_skill\")\n if not request:\n while self.turning_learned_skills_list and self.turning_learned_skills_list[0][0] == len(\n self.learned_skill_names):\n\n skill = self.turning_learned_skills_list.pop(0)\n self.learned_skill(skill[1])\n return\n index = int(request[Constant.INDEX])\n debug_print(\"index is\" + str(index))\n debug_print(\"skills: \")\n debug_print(self.skills)\n\n if (index < len(self.skills)) and (index == len(self.learned_skill_names)):\n skill_name = self.skills[index].skill_name\n debug_print(\"skill name: \" + skill_name)\n else:\n while self.turning_learned_skills_list and self.turning_learned_skills_list[0][0] == len(self.learned_skill_names):\n skill = self.turning_learned_skills_list.pop(0)\n self.learned_skill(skill[1])\n self.turning_learned_skills_list.append((index, request))\n self.turning_learned_skills_list.sort()\n debug_print(\"returned in else\")\n return\n if skill_name not in Constant.SKILLS:\n debug_print(\"not in skills\")\n return\n if skill_name == Constant.ARMOUR:\n self.addItemToLeftHand(ConstantImage.ARMOUR_IMAGE_SPRITE,Constant.ITEM_NAME_ARMOUR ,Constant.ARMOUR_IMAGE_SCLAE)\n elif skill_name == Constant.SWORD:\n self.addItemToRightHand(ConstantImage.SWORD_IMAGE_SPRITE, Constant.ITEM_NAME_SWORD, Constant.SWORD_IMAGE_SCALE)\n elif skill_name == Constant.ANIMAL:\n for tile in self.land.tiles:\n if tile.tile_type == Constant.TILE_TYPE_ANIMAL:\n tile.display_plant_or_animal = True\n elif skill_name == Constant.PLANT:\n for tile in self.land.tiles:\n if tile.tile_type == Constant.TILE_TYPE_PLANT:\n tile.display_plant_or_animal = True\n elif skill_name == Constant.HOUSE:\n self.addHouse()\n self.skills[index].greyed = False\n self.skills[index].applied = True\n debug_print(\"set skill greyed false\")\n self.learned_skill_names.append(skill_name)\n\n def set_message(self, message):\n self.current_message = message\n self.message_countdown = Constant.MESSAGE_TIME\n\n\n def max_health_up(self):\n self.max_health += 1\n\n def max_health_down(self):\n self.max_health -= 1\n\n def current_health_up(self):\n self.current_health += 1\n\n def current_health_down(self):\n self.current_health -= 1\n\n\n def current_health_up_with_amount(self, hp_increment):\n self.current_health += hp_increment\n if self.current_health > self.max_health:\n self.current_health = self.max_health\n\n\n def attack_monster_or_not(self, monster):\n\n if (Constant.SWORD not in self.learned_skill_names) or self.attacked or \\\n (Constant.SWORD in self.learned_skill_names):\n return\n\n self.attacked = random.random() >= self.attack_probability\n\n if self.attacked and self.attack_power > 0 :\n monster.set_attack(self.attack_power)\n self.attack = AttackAnimation(ConstantImage.VILLAGER_ATTACK_IMAGE_SPRITE, monster.x, monster.y, Constant.VILLAGER_ATTACK_IMAGE_SCALE)\n self.attack_display_count_down = self.attack_display_count_down_const\n\n else:\n self.attacked = False\n\n\n def current_health_down_with_amount(self, hp_decrement):\n\n if self.house is not None and self.house.display_house:\n self.house.house_durability_decrement_with_amount(hp_decrement)\n if self.house.current_durability <= 0:\n self.house.display_house = False\n return\n\n if Constant.ARMOUR in self.learned_skill_names:\n hp_decrement -= Constant.ITEM_ARMOUR_DEFEND_POWER_ADD\n\n if hp_decrement >= self.current_health:\n self.current_health = 0\n self.dead = True\n return\n self.current_health -= hp_decrement\n\n def build_house(self):\n if self.house:\n if not self.house.display_house:\n if self.build_house_countdown == Constant.BUILD_HOUSE_COUNT_DOWN:\n self.set_message(Constant.BUILD_HOUSE_MESSAGE)\n self.build_house_countdown -= 1\n elif self.build_house_countdown > 0:\n self.build_house_countdown -= 1\n else:\n self.house.display_house = True\n self.build_house_countdown = Constant.BUILD_HOUSE_COUNT_DOWN\n\n def render_attack(self, screen):\n if self.attacked and self.attack_display_count_down != 0:\n self.attack.render(screen)\n self.attack_display_count_down -= 1\n if self.attack_display_count_down <= 0:\n self.attacked = False\n\n def render(self, screen):\n\n if self.house and self.house.display_house:\n self.house.render(screen)\n\n super().render(screen)\n\n for one_skill in self.skills:\n one_skill.render(screen)\n\n self.land.render(screen)\n name = self.font.render(\"Villager \" + str(self.villager_id), 1, Constant.BLACK)\n screen.blit(name, (self.x - name.get_width() // 2, self.y + self.height // 2))\n\n if self.role != Role.FOLLOWER:\n if self.role == Role.LEADER:\n title = \"Leader\"\n role = self.font.render(title, 1, Constant.BLACK)\n screen.blit(role, (self.x - role.get_width() // 2, self.y + self.height // 2 + role.get_height() + 2))\n\n if self.message_countdown > 0:\n message = self.font.render(self.current_message, 1, Constant.BLACK)\n screen.blit(message, (self.x - message.get_width() // 2, self.y - self.height // 2 - message.get_height() - 2))\n self.message_countdown -= 1\n\n pygame.draw.rect(screen, Constant.GRAY, pygame.Rect((self.x - self.width // 2,\n self.y - self.height // 2),\n (self.width, Constant.HEAL_BAR_HEIGHT)))\n pygame.draw.rect(screen, Constant.RED, pygame.Rect((self.x - self.width // 2,\n self.y - self.height // 2),\n (self.width * (self.current_health / self.max_health),\n Constant.HEAL_BAR_HEIGHT)))\n\n for one_item in self.item:\n one_item.render(screen)\n\n", "repo_name": "Dawindmill/Raft-In-Python", "sub_path": "Visualization/villager.py", "file_name": "villager.py", "file_ext": "py", "file_size_in_byte": 16318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "image.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 18, "usage_type": "attribute"}, {"api_name": "threading.RLock", "line_number": 20, "usage_type": "call"}, {"api_name": "role.Role.FOLLOWER", "line_number": 26, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 26, "usage_type": "name"}, {"api_name": "constant.Constant.VILLAGER_MAX_HP", "line_number": 35, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 35, "usage_type": "name"}, {"api_name": "image.get_rect", "line_number": 43, "usage_type": "call"}, {"api_name": "land.Land", "line_number": 53, "usage_type": "call"}, {"api_name": "constant.Constant.LAND_SIZE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 53, "usage_type": "name"}, {"api_name": "constant.Constant.BUILD_HOUSE_COUNT_DOWN", "line_number": 56, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 56, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 58, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 58, "usage_type": "attribute"}, {"api_name": "constant.Constant.ATTACK_DISPLAY_COUNT_DOWN", "line_number": 61, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 61, "usage_type": "name"}, {"api_name": "constant.Constant.ATTACK_DISPLAY_COUNT_DOWN", "line_number": 62, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 62, "usage_type": "name"}, {"api_name": "constant.Constant.TILE_TYPE_PLANT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 78, "usage_type": "name"}, {"api_name": "constant.Constant.PLANT_HEALTH_INCREASE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 79, "usage_type": "name"}, {"api_name": "constant.Constant.TILE_TYPE_ANIMAL", "line_number": 80, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 80, "usage_type": "name"}, {"api_name": "constant.Constant.ANIMAL_HEALTH_INCREASE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 81, "usage_type": "name"}, {"api_name": "house.House", "line_number": 90, "usage_type": "call"}, {"api_name": "image.get_rect", "line_number": 103, "usage_type": "call"}, {"api_name": "item.Item", "line_number": 106, "usage_type": "call"}, {"api_name": "image.get_rect", "line_number": 120, "usage_type": "call"}, {"api_name": "item.Item", "line_number": 123, "usage_type": "call"}, {"api_name": "skill.Skill", "line_number": 150, "usage_type": "call"}, {"api_name": "image.get_rect", "line_number": 150, "usage_type": "call"}, {"api_name": "constant.Constant.SKILL_IMAGE_SCALE_VILLAGER", "line_number": 150, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 150, "usage_type": "name"}, {"api_name": "constant.Constant.MESSAGE_TYPE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 159, "usage_type": "name"}, {"api_name": "constant.Constant.VILLAGER_DEAD", "line_number": 161, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 161, "usage_type": "name"}, {"api_name": "constant.Constant.APPEND", "line_number": 164, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 164, "usage_type": "name"}, {"api_name": "role.Role.LEADER", "line_number": 164, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 164, "usage_type": "name"}, {"api_name": "constant.Constant.NEW_ENTRIES", "line_number": 165, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 165, "usage_type": "name"}, {"api_name": "constant.Constant.LEADERSHIP", "line_number": 167, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 167, "usage_type": "name"}, {"api_name": "constant.Constant.REQUEST_VOTE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 169, "usage_type": "name"}, {"api_name": "constant.Constant.REQUEST_VOTE_REPLY", "line_number": 171, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 171, "usage_type": "name"}, {"api_name": "constant.Constant.REQUEST_COMMAND_ACK", "line_number": 173, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 173, "usage_type": "name"}, {"api_name": "role.Role.LEADER", "line_number": 173, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 173, "usage_type": "name"}, {"api_name": "constant.Constant.APPEND_REPLY", "line_number": 175, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 175, "usage_type": "name"}, {"api_name": "constant.Constant.COMMIT_INDEX", "line_number": 177, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 177, "usage_type": "name"}, {"api_name": "constant.Constant.MESSAGE_TYPE", "line_number": 188, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 188, "usage_type": "name"}, {"api_name": "constant.Constant.PEER_ID", "line_number": 188, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "constant.Constant.AUTHORITY_MESSAGE", "line_number": 204, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 204, "usage_type": "name"}, {"api_name": "constant.Constant.SENDER_TERM", "line_number": 213, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 213, "usage_type": "name"}, {"api_name": "role.Role.LEADER", "line_number": 218, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 218, "usage_type": "name"}, {"api_name": "constant.Constant.NEW_LEADER_MESSAGE", "line_number": 220, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 220, "usage_type": "name"}, {"api_name": "constant.Constant.SENDER_TERM", "line_number": 229, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 229, "usage_type": "name"}, {"api_name": "role.Role.CANDIDATE", "line_number": 233, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 233, "usage_type": "name"}, {"api_name": "constant.Constant.CANDIDATE_MESSAGE", "line_number": 234, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 234, "usage_type": "name"}, {"api_name": "constant.Constant.SENDER_TERM", "line_number": 237, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 237, "usage_type": "name"}, {"api_name": "constant.Constant.VOTE_PEER_ID", "line_number": 240, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 240, "usage_type": "name"}, {"api_name": "constant.Constant.VOTE_GRANTED", "line_number": 241, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 241, "usage_type": "name"}, {"api_name": "constant.Constant.VOTE_GRANTED", "line_number": 242, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 242, "usage_type": "name"}, {"api_name": "constant.Constant.VOTE_MESSAGE.format", "line_number": 243, "usage_type": "call"}, {"api_name": "constant.Constant.VOTE_MESSAGE", "line_number": 243, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 243, "usage_type": "name"}, {"api_name": "constant.Constant.REQUEST_COMMAND_LIST", "line_number": 246, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 246, "usage_type": "name"}, {"api_name": "constant.Constant.INDEX", "line_number": 247, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 247, "usage_type": "name"}, {"api_name": "constant.Constant.APPEND_RESULT", "line_number": 262, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 262, "usage_type": "name"}, {"api_name": "constant.Constant.LAST_LOG_INDEX", "line_number": 264, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 264, "usage_type": "name"}, {"api_name": "constant.Constant.INDEX", "line_number": 278, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 278, "usage_type": "name"}, {"api_name": "constant.Constant.SKILLS", "line_number": 294, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 294, "usage_type": "name"}, {"api_name": "constant.Constant.ARMOUR", "line_number": 297, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 297, "usage_type": "name"}, {"api_name": "constant_image.ConstantImage.ARMOUR_IMAGE_SPRITE", "line_number": 298, "usage_type": "attribute"}, {"api_name": "constant_image.ConstantImage", "line_number": 298, "usage_type": "name"}, {"api_name": "constant.Constant.ITEM_NAME_ARMOUR", "line_number": 298, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 298, "usage_type": "name"}, {"api_name": "constant.Constant.ARMOUR_IMAGE_SCLAE", "line_number": 298, "usage_type": "attribute"}, {"api_name": "constant.Constant.SWORD", "line_number": 299, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 299, "usage_type": "name"}, {"api_name": "constant_image.ConstantImage.SWORD_IMAGE_SPRITE", "line_number": 300, "usage_type": "attribute"}, {"api_name": "constant_image.ConstantImage", "line_number": 300, "usage_type": "name"}, {"api_name": "constant.Constant.ITEM_NAME_SWORD", "line_number": 300, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 300, "usage_type": "name"}, {"api_name": "constant.Constant.SWORD_IMAGE_SCALE", "line_number": 300, "usage_type": "attribute"}, {"api_name": "constant.Constant.ANIMAL", "line_number": 301, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 301, "usage_type": "name"}, {"api_name": "constant.Constant.TILE_TYPE_ANIMAL", "line_number": 303, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 303, "usage_type": "name"}, {"api_name": "constant.Constant.PLANT", "line_number": 305, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 305, "usage_type": "name"}, {"api_name": "constant.Constant.TILE_TYPE_PLANT", "line_number": 307, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 307, "usage_type": "name"}, {"api_name": "constant.Constant.HOUSE", "line_number": 309, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 309, "usage_type": "name"}, {"api_name": "constant.Constant.MESSAGE_TIME", "line_number": 318, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 318, "usage_type": "name"}, {"api_name": "constant.Constant.SWORD", "line_number": 342, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 342, "usage_type": "name"}, {"api_name": "constant.Constant.SWORD", "line_number": 343, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 343, "usage_type": "name"}, {"api_name": "random.random", "line_number": 346, "usage_type": "call"}, {"api_name": "attack.AttackAnimation", "line_number": 350, "usage_type": "call"}, {"api_name": "constant_image.ConstantImage.VILLAGER_ATTACK_IMAGE_SPRITE", "line_number": 350, "usage_type": "attribute"}, {"api_name": "constant_image.ConstantImage", "line_number": 350, "usage_type": "name"}, {"api_name": "constant.Constant.VILLAGER_ATTACK_IMAGE_SCALE", "line_number": 350, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 350, "usage_type": "name"}, {"api_name": "constant.Constant.ARMOUR", "line_number": 365, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 365, "usage_type": "name"}, {"api_name": "constant.Constant.ITEM_ARMOUR_DEFEND_POWER_ADD", "line_number": 366, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 366, "usage_type": "name"}, {"api_name": "constant.Constant.BUILD_HOUSE_COUNT_DOWN", "line_number": 377, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 377, "usage_type": "name"}, {"api_name": "constant.Constant.BUILD_HOUSE_MESSAGE", "line_number": 378, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 378, "usage_type": "name"}, {"api_name": "constant.Constant.BUILD_HOUSE_COUNT_DOWN", "line_number": 384, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 384, "usage_type": "name"}, {"api_name": "constant.Constant.BLACK", "line_number": 404, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 404, "usage_type": "name"}, {"api_name": "role.Role.FOLLOWER", "line_number": 407, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 407, "usage_type": "name"}, {"api_name": "role.Role.LEADER", "line_number": 408, "usage_type": "attribute"}, {"api_name": "role.Role", "line_number": 408, "usage_type": "name"}, {"api_name": "constant.Constant.BLACK", "line_number": 410, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 410, "usage_type": "name"}, {"api_name": "role.get_width", "line_number": 411, "usage_type": "call"}, {"api_name": "role.get_height", "line_number": 411, "usage_type": "call"}, {"api_name": "constant.Constant.BLACK", "line_number": 414, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 414, "usage_type": "name"}, {"api_name": "pygame.draw.rect", "line_number": 418, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 418, "usage_type": "attribute"}, {"api_name": "constant.Constant.GRAY", "line_number": 418, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 418, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 418, "usage_type": "call"}, {"api_name": "constant.Constant.HEAL_BAR_HEIGHT", "line_number": 420, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 420, "usage_type": "name"}, {"api_name": "pygame.draw.rect", "line_number": 421, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 421, "usage_type": "attribute"}, {"api_name": "constant.Constant.RED", "line_number": 421, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 421, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 421, "usage_type": "call"}, {"api_name": "constant.Constant.HEAL_BAR_HEIGHT", "line_number": 424, "usage_type": "attribute"}, {"api_name": "constant.Constant", "line_number": 424, "usage_type": "name"}]} +{"seq_id": "14997195375", "text": "import cocotb\nfrom cocotb.triggers import Timer\nfrom cocotb.result import TestFailure\nimport numpy as np\n\nfrom standard_deviation_filter import standard_deviation_filter\n\nTICK = 1\n\n@cocotb.test()\ndef standard_deviation_basic_test(dut):\n \"\"\"windowed standard deviation test for a step transition\"\"\"\n\n window_size = 128\n max_value = 2**14-1\n\n ## model\n data = np.append(np.zeros(window_size*2), np.ones(window_size*10)*(2**14-1))\n result = standard_deviation_filter(data, window_size) \n\n # non_zero = [hex(x) for x in result if x != 0]\n # print(non_zero)\n\n ## simulation\n # reset the dut\n dut.reset = 0\n dut.data_in = 0\n dut.clk = 0\n yield Timer(TICK)\n dut.clk = 1\n yield Timer(TICK)\n dut.reset = 1\n\n for i in range(window_size*2 + window_size*8): \n dut.data_in = int(data[i])\n dut.clk = 0\n yield Timer(TICK)\n dut.clk = 1\n yield Timer(TICK)\n\n # clock in 'window_size' integers, plus the 28 cycle delay for variance and 14 cycle delay for sqrt\n if i >= (window_size + 28 + 6):\n desired_result = result[i-(window_size+28+6)]\n if int(dut.data_out) != desired_result:\n raise TestFailure(\"standard deviation output was wrong; got %i, expected %i\" % (int(dut.data_out), desired_result))\n\n\n \n", "repo_name": "jeremyherbert/real_time_stdev", "sub_path": "standard_deviation/tests/test_standard_deviation.py", "file_name": "test_standard_deviation.py", "file_ext": "py", "file_size_in_byte": 1326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.append", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "standard_deviation_filter.standard_deviation_filter", "line_number": 19, "usage_type": "call"}, {"api_name": "cocotb.triggers.Timer", "line_number": 29, "usage_type": "call"}, {"api_name": "cocotb.triggers.Timer", "line_number": 31, "usage_type": "call"}, {"api_name": "cocotb.triggers.Timer", "line_number": 37, "usage_type": "call"}, {"api_name": "cocotb.triggers.Timer", "line_number": 39, "usage_type": "call"}, {"api_name": "cocotb.result.TestFailure", "line_number": 45, "usage_type": "call"}, {"api_name": "cocotb.test", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "3213402959", "text": "from django.db import models\nfrom django.contrib.postgres.fields import ArrayField, JSONField\nfrom django.utils.translation import ugettext_lazy as _\n# Create your models here.\n\n\nclass District(models.Model):\n name = models.CharField(max_length=20, verbose_name=\"지역구 이름\")\n count = models.PositiveIntegerField(verbose_name='정적수', default=1)\n state_name = models.CharField(max_length=20, verbose_name=\"지역구 행정구역 명\")\n active_21 = models.BooleanField(_(\"Is 21 district\"), default=False)\n active_20 = models.BooleanField(_(\"Is 20 district\"), default=False)\n active_19 = models.BooleanField(_(\"Is 19 district\"), default=False)\n\n class Meta:\n verbose_name = verbose_name_plural = '지역구'\n\n def __str__(self):\n return f\"[{self.name}]\"\n\nclass City(models.Model):\n province = models.CharField(max_length=20, verbose_name=\"시군구이름\", blank=True)\n town = ArrayField(base_field=models.CharField(verbose_name='읍면동명', max_length=255), default=list)\n state = models.CharField(max_length=20, verbose_name=\"지역 이름\")\n district = models.ForeignKey(verbose_name='지역구', \n to=District, \n on_delete=models.SET_NULL, \n null=True, \n blank=True, \n default=None\n ) # 비례인 경우 없음", "repo_name": "0011team/vote21-api", "sub_path": "src/district/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.postgres.fields.ArrayField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "20477529688", "text": "import random\n\nimport pytest\n\nfrom rtp_packet import RtpPacket\n\nPAYLOAD_MAX_SIZE = 200\n\n\ndef test_encoding_byte_0():\n result: bytearray = RtpPacket.encode(0, 0, 0, 0, 0, 0, 0, 0, bytearray(5))\n assert result[0] == 0\n\n result: bytearray = RtpPacket.encode(0, 1, 0, 0, 0, 0, 0, 0, bytearray(5))\n assert result[0] == 0b00100000\n result: bytearray = RtpPacket.encode(2, 0, 0, 0, 0, 0, 0, 0, bytearray(5))\n assert result[0] == 0b10000000\n\n result: bytearray = RtpPacket.encode(0, 1, 0, 0, 0, 0, 0, 0, bytearray(5))\n assert result[0] == 0b00100000\n\n result: bytearray = RtpPacket.encode(0, 0, 1, 0, 0, 0, 0, 0, bytearray(5))\n assert result[0] == 0b00010000\n\n result: bytearray = RtpPacket.encode(0, 0, 0, 15, 0, 0, 0, 0, bytearray(5))\n assert result[0] == 0b00001111\n\n\ndef test_encoding_byte_1():\n result: bytearray = RtpPacket.encode(0, 0, 0, 0, 0, 0, 0, 0, bytearray(5))\n assert result[1] == 0\n\n result: bytearray = RtpPacket.encode(0, 0, 0, 0, 1, 0, 0, 0, bytearray(5))\n assert result[1] == 0b10000000\n\n result: bytearray = RtpPacket.encode(0, 0, 0, 0, 0, 26, 0, 0, bytearray(5))\n assert result[1] == 0b00011010\n\n\ndef test_encoding_byte_2_3():\n result: bytearray = RtpPacket.encode(2, 1, 1, 1, 1, 26, 0, 0, bytearray(5))\n assert result[2] == 0\n assert result[3] == 0\n\n result: bytearray = RtpPacket.encode(2, 1, 1, 1, 1, 26, 50000, 0, bytearray(5))\n assert result[2] == 0b11000011\n assert result[3] == 0b01010000\n\n result: bytearray = RtpPacket.encode(2, 1, 1, 1, 1, 26, 65535, 0, bytearray(5))\n assert result[2] == 0b11111111\n assert result[3] == 0b11111111\n\n with pytest.raises(OverflowError):\n result: bytearray = RtpPacket.encode(2, 1, 1, 1, 1, 26, 65536, 0, bytearray(5))\n\n\ndef test_encoding_byte_4_7():\n import time\n\n timestamp = int(time.time())\n result: bytearray = RtpPacket.encode(2, 1, 1, 1, 1, 26, 65535, 0, bytearray(5))\n\n assert int(result[4:8].hex(), 16) == timestamp\n\n\ndef test_encoding_byte_8_11():\n for i in range(10):\n random_number: int = random.randint(0, 2 ** 32 - 1)\n result: bytearray = RtpPacket.encode(2, 0, 0, 0, 0, 26, 5, random_number, bytearray(5))\n\n assert int(result[8:12].hex(), 16) == random_number\n\n\ndef test_encoding_payload():\n payload: bytearray = bytearray(random.randbytes(PAYLOAD_MAX_SIZE))\n result: bytearray = RtpPacket.encode(2, 0, 0, 0, 0, 26, 5, 4, payload)\n\n assert result[12:] == payload\n", "repo_name": "ducthuy-ng/computer-networking1", "sub_path": "tests/test_rtp_packet.py", "file_name": "test_rtp_packet.py", "file_ext": "py", "file_size_in_byte": 2469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rtp_packet.RtpPacket.encode", "line_number": 11, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 11, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 14, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 14, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 16, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 16, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 19, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 19, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 22, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 22, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 25, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 25, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 30, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 30, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 33, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 33, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 36, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 36, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 41, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 41, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 45, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 45, "usage_type": "name"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 49, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 49, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 53, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 54, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 54, "usage_type": "name"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 61, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 61, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 69, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 69, "usage_type": "name"}, {"api_name": "random.randbytes", "line_number": 75, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket.encode", "line_number": 76, "usage_type": "call"}, {"api_name": "rtp_packet.RtpPacket", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "40322835282", "text": "import os\nimport sys\n\nimport HierAMuS\nimport gmsh\n\ndef run(quadMesh,order,numrefs):\n\n gmsh.initialize()\n gmsh.model.add('test')\n\n\n L=10\n h=1\n\n E=100\n nu=0.0\n G=E/2/(1+nu)\n\n\n disporder=order\n\n p1=gmsh.model.occ.addPoint(0,0,0,2)\n p2=gmsh.model.occ.addPoint(L,0,0,2)\n p3=gmsh.model.occ.addPoint(L,h,0,2)\n p4=gmsh.model.occ.addPoint(0,h,0,2)\n\n l1=gmsh.model.occ.addLine(p1,p2)\n l2=gmsh.model.occ.addLine(p2,p3)\n l3=gmsh.model.occ.addLine(p3,p4)\n l4=gmsh.model.occ.addLine(p4,p1)\n\n cl=gmsh.model.occ.addCurveLoop([l1,l2,l3,l4])\n face=gmsh.model.occ.addPlaneSurface([cl])\n\n gmsh.model.occ.synchronize()\n gmsh.option.setNumber(\"Mesh.Algorithm\",4)\n\n if quadMesh:\n gmsh.option().setNumber(\"Mesh.RecombineAll\", 1)\n gmsh.option.setNumber(\"Mesh.RecombinationAlgorithm\", 2)\n\n gmsh.model.mesh.generate(2)\n for i in range(numrefs):\n gmsh.model.mesh.refine()\n\n gmsh.fltk.run()\n\n pathname = os.path.dirname(sys.argv[0])\n currPath = os.path.abspath(pathname)\n fesys= HierAMuS.FEMPy(pathname, \"firstvolume\")\n fesys.setStaticSolutionState()\n fesys.setSolver(3)\n fesys.getMacroCommands().setLogLevel(fesys.NoLog(), fesys.NoLog())\n\n gm = fesys.getMeshCommands().getFromGMESH()\n gm.addGeomFromGmsh(gmsh)\n\n fesys.getMeshCommands().getGeometryCommands().checkGeometry()\n\n gm.addFaceElements(gmsh,face,1)\n\n # if quadMesh:\n # gm.addQuadrilateralFiniteElements(gmsh,1,face,1)\n # else:\n # gm.addTriangleFiniteElements(gmsh,1,face,1)\n\n\n fesys.getMeshCommands().getElementFormulations().addEL201_2DShell(1,meshiddisp=1,disporder=disporder,mode=1)\n #fesys.getMeshCommands().getElementFormulations().addEL205_HDivTest(1,100,disporder=disporder,stressorder=disporder-1,mode=2,meshiddisp=1,meshidstress=2,E=E,nu=nu)\n fesys.getMeshCommands().getMaterialFormulations().addMA3_2D_LinearElastic_Isotrop(1,E=E,nu=nu,thickness=1,plainstrain=0)\n fesys.getMeshCommands().addMaterial(1,1,1)\n\n fesys.getMeshCommands().setDegreesOfFreedom()\n\n edges = gm.getEdgeNumbers(gmsh,l4,1)\n fesys.getMeshCommands().getBoundaryConditions().BC(fesys.getMeshCommands().getGeometryCommands().edgeType(),edges,meshId=1,dofs=[1,1,1],shapeOrder=disporder)\n #fesys.getMeshCommands().getBoundaryConditions().BC(fesys.getMeshCommands().getGeometryCommands().vertexType(),p1,meshId=1,dofs=[1,1,1],shapeOrder=disporder)\n #fesys.getMeshCommands().getBoundaryConditions().BC(fesys.getMeshCommands().getGeometryCommands().vertexType(),p4,meshId=1,dofs=[1,0,1],shapeOrder=disporder)\n \n edges = gm.getEdgeNumbers(gmsh,l2,1)\n fesys.getMeshCommands().getBoundaryConditions().load(fesys.getMeshCommands().getGeometryCommands().edgeType(),edges,meshId=1,load=[0,1,0],propnum=0,shapeorder=disporder)\n\n fesys.getMacroCommands().sparseSetUp()\n\n fesys.getMacroCommands().setPropFunction(0)\n fesys.getMacroCommands().setDt(1)\n fesys.getMacroCommands().timeincr()\n\n fesys.getMacroCommands().assembleSolve()\n\n fesys.getPlotCommands().toFile()\n\n\n # [nt,ncoor,npcoor]=gmsh.model.mesh.getNodes(1,l2,includeBoundary=True)\n\n # t=0\n # for i in nt:\n # sol=fesys.getMacroCommands().getSolution(fesys.getMeshCommands().getGeometryCommands().vertexType(),i,meshId=1)\n # print(sol)\n # t+=sol[1]\n\n # print(t/len(nt))\n\n # print(\"Result should be: \",4*L*L*L/E/h/h/h+L/G*6/5)\n \n sol = fesys.getMacroCommands().getSolution(fesys.getMeshCommands().getGeometryCommands().vertexType(),p2,1)\n fesys.getMacroCommands().setLogLevel(fesys.FullLog(), fesys.BasicLog())\n fesys.getMacroCommands().printInfo()\n fesys.getMacroCommands().computeEigenValues(10,30,max=False)\n return sol\n\n\nprint(run(False,2,1))", "repo_name": "sklarmann/HierAMuS", "sub_path": "Tests/python/twoD/triangle/gmshtriangle.py", "file_name": "gmshtriangle.py", "file_ext": "py", "file_size_in_byte": 3748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "gmsh.initialize", "line_number": 9, "usage_type": "call"}, {"api_name": "gmsh.model.add", "line_number": 10, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 23, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 23, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 24, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 25, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 26, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 28, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 28, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 29, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 30, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 30, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 31, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 31, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addCurveLoop", "line_number": 33, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 33, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPlaneSurface", "line_number": 34, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.synchronize", "line_number": 36, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "gmsh.option.setNumber", "line_number": 37, "usage_type": "call"}, {"api_name": "gmsh.option", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gmsh.option", "line_number": 40, "usage_type": "call"}, {"api_name": "gmsh.option.setNumber", "line_number": 41, "usage_type": "call"}, {"api_name": "gmsh.option", "line_number": 41, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.generate", "line_number": 43, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.refine", "line_number": 45, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gmsh.fltk.run", "line_number": 47, "usage_type": "call"}, {"api_name": "gmsh.fltk", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "HierAMuS.FEMPy", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "3200359212", "text": "\"\"\"\nThe file defines some tool functions which will be used in the blueprints.\n\"\"\"\nfrom flask import session\nfrom models import Class, Task\nimport datetime\nfrom threading import Timer\nfrom exts import db\nimport time\n\n\n# get all modules of the current user\ndef get_modules():\n uid = session.get('uid')\n modules = Class.query.filter_by(uid=uid).all()\n res = [] # a list of dictionaries\n for module in modules:\n res.append({'id': module.cid, 'name': module.cname, 'color': module.color})\n\n return res\n\n\n# get all uncompleted tasks of the current user\ndef get_uncompleted_tasks():\n modules = get_modules()\n res = [] # a list of dictionaries\n for module in modules:\n cid = module['id']\n tasks = Task.query.filter_by(cid=cid, task_status=False).all()\n for task in tasks:\n if task.task_date < datetime.date.today(): # if the day of deadline is passed\n task_color = 'red'\n elif task.task_date > datetime.date.today(): # if the day of deadline is not passed\n task_color = 'black'\n else:\n if task.task_time < datetime.datetime.now().time(): # if the deadline is passed\n task_color = 'red'\n else: # if the deadline is not passed\n task_color = 'black'\n res.append({\"id\": task.tid, \"task_name\": task.task_name, \"task_description\": task.task_description, \"date\": task.task_date, \"time\": task.task_time, \"informed\": task.informed, \"cid\": cid, \"module_name\": module['name'], \"module_color\": module['color'], \"task_color\": task_color})\n\n return res\n\n\n# get all completed tasks of the current user\ndef get_completed_tasks():\n modules = get_modules()\n res = [] # a list of dictionaries\n for module in modules:\n cid = module['id']\n tasks = Task.query.filter_by(cid=cid, task_status=True).all() # get all completed tasks\n for task in tasks:\n res.append({'id': task.tid, 'task_name': task.task_name, 'task_description': task.task_description, 'date': task.task_date, 'time': task.task_time, 'informed': task.informed, 'cid': cid, 'module_name': module['name'], 'module_color': module['color'], 'completed_date': task.completed_date, 'completed_time': task.completed_time})\n\n return res\n\n\n# get the number of uncompleted tasks of the current user\ndef get_number_of_uncompleted_tasks():\n tasks = get_uncompleted_tasks()\n return len(tasks)\n\n\n# get the number of completed tasks of the current user\ndef get_number_of_completed_tasks():\n tasks = get_completed_tasks()\n return len(tasks)\n\n\n# get the number of tasks of the current user\ndef get_unCompletedTasks_by_moduleID(i):\n tasks = get_uncompleted_tasks()\n res = []\n for task in tasks:\n if str(task.get('cid')) == str(i):\n res.append(task)\n print(res)\n return res\n\n\n# get the number of tasks of the current user\ndef get_sorted_tasks(func, sort_by):\n tasks = func()\n if sort_by == 'created_time_desc':\n # sort by module id\n res = sorted(tasks, key=lambda x: x['id'], reverse=True)\n return res\n elif sort_by == 'created_time_asc':\n # sort by module id\n res = sorted(tasks, key=lambda x: x['id'])\n return res\n elif sort_by == 'deadline_desc':\n # sort by deadline\n res = sorted(tasks, key=lambda x: datetime.datetime.combine(x['date'], x['time']), reverse=True)\n return res\n elif sort_by == 'deadline_asc':\n # sort by deadline\n res = sorted(tasks, key=lambda x: datetime.datetime.combine(x['date'], x['time']))\n return res\n elif sort_by == 'completed_time_desc':\n # sort by completed time\n res = sorted(tasks, key=lambda x: datetime.datetime.combine(x['completed_date'], x['completed_time']), reverse=True)\n return res\n elif sort_by == 'completed_time_asc':\n # sort by completed time\n res = sorted(tasks, key=lambda x: datetime.datetime.combine(x['completed_date'], x['completed_time']))\n return res\n\n", "repo_name": "Foreverythin/TodoList", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.session.get", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 14, "usage_type": "name"}, {"api_name": "models.Class.query.filter_by", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Class.query", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Class", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Task.query.filter_by", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Task.query", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 33, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Task.query.filter_by", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Task.query", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 102, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 102, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "20424614760", "text": "\"\"\"Command line option.\"\"\"\nfrom __future__ import absolute_import\n\nfrom argparse import ArgumentParser\n\nfrom .uom import base_unit, convert\n\n\ndef cmd_convert(arg=None):\n \"\"\"Convert value.\"\"\"\n parser = ArgumentParser(prog=\"uom_convert_value\")\n\n parser.add_argument(\"value\", type=float, nargs=\"+\", help=\"value to be converted\")\n\n parser.add_argument(\"-s\", dest=\"source\", help=\"unit source\")\n\n parser.add_argument(\"-t\", dest=\"target\", help=\"unit target\")\n\n parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose\")\n\n if arg is not None:\n args = parser.parse_args(arg.split())\n else:\n args = parser.parse_args()\n\n print(args)\n\n if len(args.value) == 1:\n out = convert(args.value[0], args.source, args.target, args.verbose)\n else:\n out = convert(args.value, args.source, args.target, args.verbose)\n\n if args.verbose:\n print(f\"Output: {out}\")\n\n return out\n\n\ndef cmd_base_unit(arg=None):\n \"\"\"Base unit.\"\"\"\n parser = ArgumentParser(prog=\"uom_base_unit\")\n\n parser.add_argument(\"unit\", help=\"input unit\")\n\n parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose\")\n\n if arg is not None:\n args = parser.parse_args(arg.split())\n else:\n args = parser.parse_args()\n\n print(args)\n\n out = base_unit(args.unit, args.verbose)\n\n if args.verbose:\n print(f\"Output: {out}\")\n\n return out\n", "repo_name": "Schlumberger/UOM", "sub_path": "uom/cmd_line.py", "file_name": "cmd_line.py", "file_ext": "py", "file_size_in_byte": 1432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "uom.convert", "line_number": 29, "usage_type": "call"}, {"api_name": "uom.convert", "line_number": 31, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 41, "usage_type": "call"}, {"api_name": "uom.base_unit", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "4497724632", "text": "\nfrom django.contrib import admin\nfrom gamestats import views\nfrom django.urls import path, include\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.views import LogoutView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('bball/', views.display),\n path('bball/team/', views.team_info),\n path('bball/player/', views.player_info),\n path('bball/about/', views.aboutpage),\n path('bball/games/', views.games),\n path('', TemplateView.as_view(template_name=\"index.html\")),\n path('accounts/', include('allauth.urls')),\n #path('/accounts/google/login/', ) \n path('logout/', LogoutView.as_view()),\n #path('bball/compare/', views.compare),\n path('bball/compare//', views.compare, name = 'comparison'),\n]\n", "repo_name": "jtjohnnyx/Basketball-Predictions---Teams-Players", "sub_path": "Prototypes/v4/bball/bball/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 768, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "gamestats.views.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "gamestats.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "gamestats.views.team_info", "line_number": 11, "usage_type": "attribute"}, {"api_name": "gamestats.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "gamestats.views.player_info", "line_number": 12, "usage_type": "attribute"}, {"api_name": "gamestats.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "gamestats.views.aboutpage", "line_number": 13, "usage_type": "attribute"}, {"api_name": "gamestats.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "gamestats.views.games", "line_number": 14, "usage_type": "attribute"}, {"api_name": "gamestats.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "gamestats.views.compare", "line_number": 20, "usage_type": "attribute"}, {"api_name": "gamestats.views", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "11216195999", "text": "# Importing boto3 library to make functionality available\nimport boto3\n\n\nsession = boto3.Session(profile_name='iamadmin-general')\n\n# Creating a client connection with AWS S3\ns3 = session.client('s3')\n# Creating a bucket\ns3.create_bucket(Bucket='testbucket20220210second') #no characters in the bucket name e.g. _ or - \nprint(\"Bucket created succesfully\")\n\n\n\n#session methods:\n# https://stackoverflow.com/questions/33378422/how-to-choose-an-aws-profile-when-using-boto3-to-connect-to-cloudfront", "repo_name": "alexprojects1/portfolio", "sub_path": "Python_AWS_Boto3/boto3_S3_bucket_create.py", "file_name": "boto3_S3_bucket_create.py", "file_ext": "py", "file_size_in_byte": 496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "boto3.Session", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "7315706825", "text": "##Note\n##euclidian distance\n##sqrt((x-x1)**2+(y-y1)**2..................)\nimport numpy as np\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nfrom collections import Counter\nimport warnings\nimport pandas as pd\nimport random\n\nstyle.use('fivethirtyeight')\n\ndef KNN(data,predict,k=3):\n if len(data)>=k:\n warnings.warn('K value is too small brah !!')\n distance=[]\n for group in data:\n for feat in data[group]:\n euclid_dist=np.linalg.norm(np.array(feat)-np.array(predict))\n #euclid_dist=sqrt(sum([(feat[i]-predict[i])**2 for i in range(len(feat))]))\n distance.append([euclid_dist,group])\n votes = [i[1] for i in sorted(distance)[:k]]\n vote_result=Counter(votes).most_common(1)[0][0]\n confidence= Counter(votes).most_common(1)[0][1]/k\n return vote_result,confidence\n\n##dataset={'k':[[1,2],[2,3],[3,1]],'r':[[6,5],[7,7],[8,6]]}\n##new_features=[5,7]\n##result= KNN(dataset,new_features,k=3)\n##print(result)\n##\n##[[plt.scatter(ii[0],ii[1],s=100,color=i) for ii in dataset[i]]for i in dataset]\n##plt.scatter(new_features[0],new_features[1],s=100,color=result)\n##plt.show()\n\ndf=pd.read_csv(\"breast-cancer-wisconsin.data\")\ndf.replace('?',-99999,inplace=True)\ndf.drop(['id'],1,inplace=True)\nfull_data=df.astype(float).values.tolist() #we need to make sure everything is float some values are strings\n#print(full_data[:10])\n#print(100*'_')\nrandom.shuffle(full_data)\n#print(full_data[:10])\n\ntest_size=0.2\ntrain_set={2:[],4:[]}\ntest_set={2:[],4:[]}\ntrain_data = full_data[:-int(test_size*len(full_data))]\ntest_data=full_data[-int(test_size*len(full_data)):]\n\nfor i in train_data:\n train_set[i[-1]].append(i[:-1])\nfor i in test_data:\n test_set[i[-1]].append(i[:-1])\n\ncorrect=0\ntotal=0\n\nfor group in test_set:\n for data in test_set[group]:\n vote,confidence=KNN(train_set,data,k=5)\n if group==vote:\n correct+=1\n else:\n print(confidence)\n total+=1\nprint('Accuracy : ',correct/total)\n\n\n\n#each datapoint is independent so you can thread KNN heavily so thats why their KNN is Faster\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "keshavbharadwaj/ML_Basics", "sub_path": "git-files/ML_Basics/KNN/KNNmanual.py", "file_name": "KNNmanual.py", "file_ext": "py", "file_size_in_byte": 2139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.style.use", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 13, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "38079318723", "text": "import os\nfrom datetime import date\nfrom discord.ext import commands\n\nimport discord\nfrom dotenv import load_dotenv\n\ndays = [31, 28, 31, 30, 31, 30,\n 31, 31, 30, 31, 30, 31]\n\nthemes = [\n \"Happiness\",\n \"Beauty\",\n \"Sleep\",\n \"Garden\",\n \"Imagination\",\n \"Secret\",\n \"Give\",\n \"Thankful\",\n \"Kindness\",\n \"Inside\",\n \"Outside\",\n \"Jump\",\n \"Friendship\",\n \"Love\",\n \"Fire\",\n \"Light\",\n \"Darkness\",\n \"Dream\",\n \"Believe\",\n \"Hope\",\n \"Faith\",\n \"Focus\",\n \"Clean\",\n \"Angry\",\n \"Work\",\n \"Music\",\n \"Stars\",\n \"Mystery\",\n \"Envelope\",\n \"Book\",\n \"Dare\",\n \"Choose\",\n \"Beach\",\n \"Forgiveness\",\n \"Disaster\",\n \"Dance\",\n \"Mistake\",\n \"New\",\n \"Yes\",\n \"Path\",\n \"Horizon\",\n \"Mountain\",\n \"River\",\n \"Open\",\n \"Heart\",\n \"Soul\",\n \"Tear\",\n \"Spiral\",\n \"Up\",\n \"Go\",\n \"Be\",\n \"Seek\",\n \"Learn\",\n \"Balance\",\n \"Grow\",\n \"Strength\",\n \"Seek\",\n \"Passion\",\n \"Move\",\n \"Today\",\n \"Awaken\",\n \"Angels\",\n \"Laughter\",\n \"Listening\",\n \"Pause\",\n \"Peace\",\n \"Relax\",\n \"Space\",\n \"Wisdom\",\n \"Difference\",\n \"Begin\",\n \"Time\",\n \"Brave\",\n \"Breathe\",\n \"Childhood\",\n \"Discovery\",\n \"Mind\",\n \"Energy\",\n \"Flower\",\n \"Health\",\n \"Harmony\",\n \"Recovery\",\n \"Stand\",\n \"Nowhere\",\n \"Soar\",\n \"Skip\",\n \"Fresh\",\n \"Delicious\",\n \"Power\",\n \"Complete\",\n \"Grace\",\n \"Legend\",\n \"Ecstasy\",\n \"Hug\",\n \"Proud\",\n \"Simple\",\n \"Machine\",\n \"Victory\",\n \"Vibrancy\",\n \"Leadership\",\n \"Alive\",\n \"Bloom\",\n \"Fame\",\n \"Enormous\",\n \"Independent\",\n \"Unique\",\n \"Metamorphosis\",\n \"Silence\",\n \"Tranquility\",\n \"Project\",\n \"Smile\",\n \"Miracle\",\n \"Amazing\",\n \"Game\",\n \"Connected\",\n \"Muse\",\n \"Dazzle\",\n \"Determination\",\n \"Free\",\n \"Help\",\n \"Intuition\",\n \"Smart\",\n \"Sweet\",\n \"Honesty\",\n \"Useful\",\n \"Safety\",\n \"Ready\",\n \"Vision\",\n \"Cycle\",\n \"Discipline\",\n \"Promise\",\n \"Patience\",\n \"Destiny\",\n \"Rejuvenate\",\n \"Shine\",\n \"Talk\",\n \"Stress\",\n \"Missing\",\n \"Age\",\n \"Success\",\n \"Motivation\",\n \"Family\",\n \"Loneliness\",\n \"Experience\",\n \"Future\",\n \"Vintage\",\n \"Wings\",\n \"Letter\",\n \"Wandering\",\n \"Factory\",\n \"Rainbow\",\n \"Ask\",\n \"Self\",\n \"Locket\",\n \"Home\",\n \"Cloud\",\n \"Hunger\",\n \"Possibilities\",\n \"Boredom\",\n \"Innocence\",\n \"Charge\",\n \"Rhythm\",\n \"Date\",\n \"Consequence\",\n \"Prescription\",\n \"Goals\",\n \"Lost\",\n \"Found\",\n \"Cry\",\n \"Parade\",\n \"Party\",\n \"Ice\",\n \"Dragon\",\n \"Alarm\",\n \"Understanding\",\n \"Uncomfortable\",\n \"Serious\",\n \"Opposites\",\n \"Playful\",\n \"Attention\",\n \"Caring\",\n \"Hero\",\n \"Lace\",\n \"Clue\",\n \"Ideas\",\n \"Junk\",\n \"Collection\",\n \"Radio\",\n \"Interview\",\n \"Zoom\",\n \"Puzzle\",\n \"Landscape\",\n \"Advice\",\n \"Memories\",\n \"Communication\",\n \"Compassion\",\n \"Universe\",\n \"Loyalty\",\n \"Sharing\",\n \"Travel\",\n \"Voices\",\n \"Sidewalk\",\n \"Tree\",\n \"Story\",\n \"Why\",\n \"Maybe\",\n \"Antique\",\n \"Action\",\n \"Culture\",\n \"Fuel\",\n \"Desire\",\n \"Weather\",\n \"Directions\",\n \"List\",\n \"Ride\",\n \"Wish\",\n \"Breakfast\",\n \"Touch\",\n \"Sadness\",\n \"Comfort\",\n \"Circle\",\n \"Reflection\",\n \"Optimism\",\n \"Anxiety\",\n \"Shadow\",\n \"Spirit\",\n \"Stretch\",\n \"Challenge\",\n \"Five\",\n \"Messy\",\n \"Adventure\",\n \"Fun\",\n \"Surprise\",\n \"Illusion\",\n \"Mischief\",\n \"Shy\",\n \"Lesson\",\n \"Whisper\",\n \"Fight\",\n \"Castle\",\n \"Protect\",\n \"Shop\",\n \"Wheel\",\n \"Pieces\",\n \"Leaves\",\n \"Fool\",\n \"Productivity\",\n \"Overcome\",\n \"Encouragement\",\n \"Watch\",\n \"Lighthouse\",\n \"Mild\",\n \"Mouth\",\n \"Rare\",\n \"Target\",\n \"Boat\",\n \"Box\",\n \"Grief\",\n \"Heavy\",\n \"Zeal\",\n \"Color\",\n \"Brilliance\",\n \"Enthusiastic\",\n \"Approval\",\n \"Enchanting\",\n \"Attraction\",\n \"Photograph\",\n \"Waterfall\",\n \"Moment\",\n \"Paradise\",\n \"Parachute\",\n \"Social\",\n \"Zingy\",\n \"Number\",\n \"Little\",\n \"Sound\",\n \"Reach\",\n \"Money\",\n \"Staircase\",\n \"Lemon\",\n \"Wood\",\n \"Try\",\n \"You\",\n \"Quote\",\n \"Weakness\",\n \"Bridge\",\n \"Building\",\n \"Clothes\",\n \"Prepare\",\n \"Teach\",\n \"Window\",\n \"Drive\",\n \"Middle\",\n \"Microphone\",\n \"Definition\",\n \"Shout\",\n \"Catch\",\n \"Release\",\n \"Balloons\",\n \"Sharp\",\n \"Crowd\",\n \"Cute\",\n \"Cheery\",\n \"Glow\",\n \"Apple\",\n \"Pictures\",\n \"Swing\",\n \"Wave\",\n \"Joke\",\n \"Cope\",\n \"Accomplish\",\n \"Special\",\n \"Neighborhood\",\n \"Wonderful\",\n \"Service\",\n \"Calling\",\n \"Magic\",\n \"Spontaneous\",\n \"Fluffy\",\n \"Organic\",\n \"Silly\",\n \"Thunderstorm\",\n \"Toys\",\n \"Butterfly\",\n \"Buzz\",\n \"Vine\",\n \"Shock\",\n \"Enjoy\",\n \"Craving\",\n \"Respect\",\n \"Abstract\",\n \"Rainbow\",\n \"Train\",\n \"Invention\",\n \"Perspective\",\n \"Genius\",\n \"Circus\",\n \"Ball\",\n \"Crystal\",\n \"Fairy\",\n \"Cup\",\n \"Reality\",\n \"Forget\",\n \"Priorities\",\n \"Journey\",\n \"Hands\",\n \"Shoes\",\n \"Television\",\n \"Newspaper\",\n \"Radiance\",\n \"Hidden\",\n \"Fright\",\n \"Climb\",\n \"Guess\",\n \"Pretend\",\n \"Turn\",\n \"Shatter\",\n \"Together\",\n \"Adapt\",\n \"Keep\",\n \"Closed\",\n \"Embrace\",\n \"Blessing\",\n \"Block\",\n \"Pizzazz\",\n \"Breakthrough\",\n \"Curiosity\",\n \"Distance\",\n \"Zen\"]\n\nload_dotenv()\nintents = discord.Intents.default()\nintents.message_content = True\nbot = commands.Bot(command_prefix='!', intents=intents)\nTOKEN = os.getenv('DISCORD_TOKEN')\n\n\n@bot.command()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\n@bot.command()\nasync def today(ctx):\n index = get_day_of_year() - 1\n response = 'Today\\'s theme is ✨' + themes[index] + '✨'\n await ctx.send(response)\n\n\n@bot.command()\nasync def yesterday(ctx):\n index = get_day_of_year() - 2\n response = 'Yesterday\\'s theme was ✨' + themes[index] + '✨'\n await ctx.send(response)\n\n\n@bot.command()\nasync def tomorrow(ctx):\n index = get_day_of_year()\n response = 'Tomorrow\\'s theme will be ✨' + themes[index] + '✨'\n await ctx.send(response)\n\n\n@bot.command()\nasync def fuckyou(ctx):\n await ctx.send(\"NO FUCK YOU!\")\n\n\n@bot.command()\nasync def highfive(ctx):\n await ctx.send(\"🙏\")\n\n\n@bot.command()\nasync def loveyou(ctx):\n await ctx.send(\"❤️\")\n\n\n@bot.command()\nasync def blessyou(ctx):\n await ctx.send(\"God's not real, we're all alone out here\")\n\n\ndef get_day_of_year():\n today_date = date.today()\n year = today_date.year\n month = today_date.month\n day = today_date.day\n\n # If current year is a leap year and the date\n # given is after the 28th of February then\n # it must include the 29th February\n if (month > 2 and year % 4 == 0 and\n (year % 100 != 0 or year % 400 == 0)):\n day += 1\n\n # Add the days in the previous months\n month -= 1\n while month > 0:\n day = day + days[month - 1]\n month -= 1\n return day\n\n\nbot.run(TOKEN)\n", "repo_name": "DevanClark/InspirationBot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 7125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 382, "usage_type": "call"}, {"api_name": "discord.Intents.default", "line_number": 383, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 383, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Bot", "line_number": 385, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 385, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 386, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 436, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 436, "usage_type": "name"}]} +{"seq_id": "29346677704", "text": "import matplotlib.pyplot as plt\n\ndef parse_elapsed_secs(s):\n\tsecs = 0.0\n\tsecs += float(s.split(\":\")[0]) * 60\n\tsecs += float(s.split(\":\")[1].split(\".\")[0])\n\tsecs += float(\"0.\" + s.split(\":\")[1].split(\".\")[1])\n\treturn secs\n\ndef get_avg_from_table_str(s):\n return float(s.split(\" (\")[0])\n\ndef bar_plot(ax, data, l, yerr=None, colors=None, total_width=0.8, single_width=1):\n # Check if colors where provided, otherwhise use the default color cycle\n if colors is None:\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n # Number of bars per group\n n_bars = len(data)\n\n # The width of a single bar\n bar_width = total_width / n_bars\n\n # List containing handles for the drawn bars, used for the legend\n bars = []\n\n # Iterate over all data\n for i, (name, values) in enumerate(data.items()):\n # The offset in x direction of that bar\n x_offset = (i - n_bars / 2) * bar_width + bar_width / 2\n\n # Draw a bar for every value of that type\n for x, y in enumerate(values):\n yerrr = None\n if yerr is not None:\n yerrr = yerr[name][x]\n bar = ax.bar(x + x_offset, y, yerr=yerrr, width=bar_width * single_width, color=colors[i % len(colors)])\n\n # Add a handle to the last drawn bar, which we'll need for the legend\n bars.append(bar[0])\n \n if l is not None:\n ax.legend(bars, l)\n\ndef set_size(width_pt, fraction=1, subplots=(1, 1)):\n \"\"\"Set figure dimensions to sit nicely in our document.\n\n Parameters\n ----------\n width_pt: float\n Document width in points\n fraction: float, optional\n Fraction of the width which you wish the figure to occupy\n subplots: array-like, optional\n The number of rows and columns of subplots.\n Returns\n -------\n fig_dim: tuple\n Dimensions of figure in inches\n \"\"\"\n # Width of figure (in pts)\n fig_width_pt = width_pt * fraction\n # Convert from pt to inches\n inches_per_pt = 1 / 72.27\n\n # Golden ratio to set aesthetic figure height\n golden_ratio = (5**.5 - 1) / 2\n\n # Figure width in inches\n fig_width_in = fig_width_pt * inches_per_pt\n # Figure height in inches\n fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])\n\n return (fig_width_in, fig_height_in)", "repo_name": "Peter-JanGootzen/bento_reproducibility_research", "sub_path": "bento_bench.py", "file_name": "bento_bench.py", "file_ext": "py", "file_size_in_byte": 2341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "12469889332", "text": "\"\"\"felicitas URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom game_rules.views import (\n get_next_poll, get_polls_per_game_type, get_poll_help, get_game_description,\n get_active_games, collect_game_polls, get_games_data)\n\n\nurlpatterns = [\n path('next-poll///', get_next_poll, name='next-poll'),\n path('all-polls//', get_polls_per_game_type, name='all-polls'),\n path('poll-help//', get_poll_help, name='poll-help'),\n path('game-info//', get_game_description, name='game-info'),\n path('games-list/', get_active_games, name='games-list'),\n path('cache-polls/', collect_game_polls, name='cache-polls'),\n path('games-data/', get_games_data, name='games-data'),\n]\n", "repo_name": "mileto94/felicitas", "sub_path": "felicitas/game_rules/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1355, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "game_rules.views.get_next_poll", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "game_rules.views.get_polls_per_game_type", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "game_rules.views.get_poll_help", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "game_rules.views.get_game_description", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "game_rules.views.get_active_games", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "game_rules.views.collect_game_polls", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "game_rules.views.get_games_data", "line_number": 29, "usage_type": "argument"}]} +{"seq_id": "18839322504", "text": "# Usage: python3 cluster.py > out.txt\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics.cluster import homogeneity_score\nfrom sklearn import metrics\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# Get medoid, nearest to medoid, and its \"normalized distance\" to the medoid\ndef medoid(cluster):\n\tn_ = len(cluster)\n\tdist_matrix = np.zeros((n_,n_))\n\tfor i in range(n_):\n\t\tfor j in range(i+1,n_):\n\t\t\tdist = np.linalg.norm(cluster[i]-cluster[j])\n\t\t\tdist_matrix[i,j] = dist\n\t\t\tdist_matrix[j,i] = dist\n\tmed = np.argmin(dist_matrix.sum(axis=0))\n\tnear = 1 if med == 0 else 0\n\tfor i in range(n_):\n\t\tif i != med:\n\t\t\tif dist_matrix[med,i] < dist_matrix[med,near]:\n\t\t\t\tnear = i\n\trel_dist = np.linalg.norm(cluster[near]-cluster[med])/np.mean(dist_matrix[med])\n\treturn cluster[med], cluster[near], rel_dist\n\n# Search for the range of K\ndef search_k(X,k_max=301):\n\tJs = []\n\tCalinks = []\n\tDavies = []\n\tfor k in range(2,k_max):\n\t\tkmeans = KMeans(n_clusters=k, random_state=0, n_jobs=-1).fit(X)\n\t\tlabels = kmeans.labels_\n\n\t\tprint(k)\n\t\tJs.append(kmeans.inertia_)\n\t\tcalinks_score = metrics.calinski_harabaz_score(X, labels)\n\t\tCalinks.append(calinks_score)\n\t\tprint('Calinks Score para %d clusters: %f'%(k,calinks_score))\n\t\tdavies_score = metrics.davies_bouldin_score(X, labels)\n\t\tDavies.append(davies_score)\n\t\tprint('Davies Bound Score para %d clusters: %f'%(k,davies_score))\n\n\t# Plot metrics as function of k\n\tplt.xlabel('Number of clusters')\n\tplt.ylabel('Inertia')\n\tplt.plot(range(2,k_max),Js)\n\tplt.show()\n\n\tplt.xlabel('Number of clusters')\n\tplt.ylabel('Calinks Score')\n\tplt.plot(range(2,k_max),Calinks)\n\tplt.show()\n\n\tplt.xlabel('Number of clusters')\n\tplt.ylabel('Davies Bound Score')\n\tplt.plot(range(2,k_max),Davies)\n\tplt.show()\n\ndef eval_elbow(X,elbow=[40,60,80]):\n\t# Evaluate some values of K close to the \"elbow\"\n\tJs = []\n\tCalinks = []\n\tDavies = []\n\tfor k in elbow:\n\t\tkmeans = KMeans(n_clusters=k, random_state=0, n_jobs=-1).fit(X)\n\t\tlabels = kmeans.labels_\n\t\tprint('%d clusters'%k)\n\n\t\t# Get random cluster\n\t\tcluster_id = np.random.choice(k)\n\t\tcluster = []\n\t\tfor i in range(X.shape[0]):\n\t\t\tif labels[i] == cluster_id:\n\t\t\t\tcluster.append(X[i])\n\n\t\t# Get medoid and nearest\n\t\tmed, near, rel_dist = medoid(cluster)\n\t\tprint('Medoid: {}'.format(med))\n\t\tprint('Nearest to medoid: {}'.format(near))\n\t\tprint('Relative distance from medoid to nearest: %f'%rel_dist)\n\n\t\t# Get scores\n\t\tprint('Inertia: %f'%kmeans.inertia_)\n\t\tdavies_score = metrics.davies_bouldin_score(X, labels)\n\t\tprint('Davies: %f'%davies_score)\n\t\tcoef_silhueta = silhouette_score(X,labels,metric='euclidean')\n\t\tprint('Silhueta: %f'%coef_silhueta)\n\ndef eval_chosen_pca(X,k=80):\n\tJs = []\n\tCalinks = []\n\tDavies = []\n\t# Evaluate with k groups by using the data from PCA and different variances\n\tfor v in [0.80, 0.85, 0.90, 0.95, 0.99]:\n\t\tprint('Variance = %f'%v)\n\t\tpca = PCA(v)\n\t\tpca.fit(X)\n\n\t\tprint('Number of components: %d'%len(pca.explained_variance_ratio_))\n\t\tprint('Explained variance: {}'.format(pca.explained_variance_ratio_))\n\n\t\tX_pca = pca.transform(X)\n\n\t\tkmeans = KMeans(n_clusters=k, random_state=0, n_jobs=-1).fit(X_pca)\n\t\tlabels = kmeans.labels_\n\n\t\t# Get random cluster\n\t\tprint()\n\t\tcluster_ids = np.random.choice(k,3,replace=False) # 3 clusters chosen randomly\n\t\tfor c in cluster_ids:\n\t\t\tprint('Cluster %d:'%c)\n\t\t\tcluster = []\n\t\t\tfor i in range(X.shape[0]):\n\t\t\t\tif labels[i] == c:\n\t\t\t\t\tcluster.append(X_pca[i])\n\t\t\t# Get medoid and nearest\n\t\t\tmed, near, rel_dist = medoid(cluster)\n\t\t\tprint('Medoid: {}'.format(med))\n\t\t\tprint('Nearest to medoid: {}'.format(near))\n\t\t\tprint('Relative distance from medoid to nearest: %f'%rel_dist)\n\t\tprint()\n\n\t\t# Get scores\n\t\tprint('Inertia: %f'%kmeans.inertia_)\n\t\tdavies_score = metrics.davies_bouldin_score(X_pca, labels)\n\t\tprint('Davies: %f'%davies_score)\n\t\tcoef_silhueta = silhouette_score(X_pca,labels,metric='euclidean')\n\t\tprint('Silhueta: %f'%coef_silhueta)\n\t\tprint()\n\ndef load_and_normalize_data(data_dir='word2vec.csv'):\n\t# Load data\n\tX = np.genfromtxt(data_dir,dtype=np.float32,delimiter=',',skip_header=0,encoding='ascii')\n\n\t# Normalize the data\n\tscaler = StandardScaler()\n\tscaler.fit(X)\n\treturn scaler.transform(X)\n\nX = load_and_normalize_data()\nsearch_k(X)\neval_elbow(X)\neval_chosen_pca(X)\n\n'''\nmed = input().split(' ')\nmed_f = []\nfor m in med:\n\tif len(m) > 1:\n\t\tmed_f.append(float(m))\n\nmed_f = np.array(med_f)\n\nfor i in range(X.shape[0]):\n\tif np.linalg.norm(X[i]-med_f) < 0.001: # np.array_equal(x,med_f)\n\t\tprint('Line %d'%(i+2))\n\t\tbreak\n'''\n", "repo_name": "Ronnypetson/clustering2018", "sub_path": "clustering/health-dataset/cluster.py", "file_name": "cluster.py", "file_ext": "py", "file_size_in_byte": 4587, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.metrics.calinski_harabaz_score", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 41, "usage_type": "name"}, {"api_name": "sklearn.metrics.davies_bouldin_score", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.davies_bouldin_score", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 89, "usage_type": "name"}, {"api_name": "sklearn.metrics.silhouette_score", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.davies_bouldin_score", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 130, "usage_type": "name"}, {"api_name": "sklearn.metrics.silhouette_score", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "74715204355", "text": "from functools import partial\n\n\ndef update_schema_if_mandatory(response, schema, patch_collection):\n if 'details' in response:\n collection_schema = response['details']['existing']['schema']\n else:\n collection_schema = response['data'].get('schema')\n\n if schema and (not collection_schema or collection_schema != schema):\n patch_collection(data={'schema': schema})\n\n\ndef get_kinto_records(kinto_client, bucket, collection, permissions,\n schema=None):\n \"\"\"Return all the kinto records for this bucket/collection.\"\"\"\n # Create bucket if needed\n kinto_client.create_bucket(bucket, if_not_exists=True)\n response = kinto_client.create_collection(\n collection, bucket, permissions=permissions, if_not_exists=True)\n\n patch_collection = partial(kinto_client.patch_collection,\n bucket=bucket, collection=collection)\n\n update_schema_if_mandatory(response, schema, patch_collection)\n\n return kinto_client.get_records(bucket=bucket, collection=collection)\n", "repo_name": "mozilla-services/xml2kinto", "sub_path": "xml2kinto/kinto.py", "file_name": "kinto.py", "file_ext": "py", "file_size_in_byte": 1052, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "functools.partial", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "13527618917", "text": "import pymysql\nimport os, sys\n\ndef getData(path):\n data = []\n f = open(path, 'r')\n f.readline() # Заголовок\n line = f.readline().strip()\n while line:\n if line[0] != '#' and len(line) > 4:\n data.append(tuple(line.replace('\\n', '').replace('\\r', '').split(';')))\n line = f.readline()\n \n return data\n \nclass db(object):\n '''\n Работа с базой\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n self.host = '127.0.0.1'\n #port = '3306'\n self.user = 'root'\n self.passwd = '123'\n self.db = 'my_db' \n \n def connect(self):\n '''\n Подключение к БД\n '''\n self.conn = pymysql.connect(host=self.host, user=self.user, passwd=self.passwd, db=self.db, charset='utf8')\n self.cur = self.conn.cursor()\n \n def close(self):\n self.conn.close()\n \n def update(self, fio, data, name_row):\n '''\n Обновление данных сотрудников\n '''\n fio = fio.replace(' ', ' ').strip().lower().split(' ')\n str_query = r\"SELECT j.`id` FROM j3m5_contact_details j WHERE j.`name` like '%\" + fio[0] + r\"%' and j.`name` like '%\" + fio[1] + r\"%'\"\n self.cur.execute(str_query)\n if self.cur.rowcount == 0:\n print('Not found: %s %s %s' % (fio[0],fio[1], fio[2]))\n elif self.cur.rowcount > 1:\n print('Many found: %s %s %s' % (fio[0],fio[1], fio[2]))\n else: \n id_row = self.cur.fetchone()\n str_query = r\"UPDATE j3m5_contact_details set %s='%s' WHERE id = %s\" % (name_row, data, id_row[0])\n self.cur.execute(str_query)\n self.conn.commit()\n print('+ %s, %s %s %s -> %s' % (id_row[0], fio[0],fio[1], fio[2], data))\n \n \ndef main():\n files_dir = os.path.dirname(sys.argv[0]) + '/'\n file_data = 'dr.csv';\n data = getData(files_dir + file_data)\n my_db = db()\n my_db.connect()\n for i in data:\n my_db.update(i[0], i[1], 'created' )\n my_db.close()\n\nif __name__ == '__main__':\n main()", "repo_name": "nerf-qh/contacts", "sub_path": "add_info.py", "file_name": "add_info.py", "file_ext": "py", "file_size_in_byte": 2163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymysql.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "32892880365", "text": "import json\nfrom datetime import datetime\n\n\nfile = open('romanToInt.json', 'r')\ntests = json.load(file)\n\ntry:\n def romanToInt(s):\n return len(s)\n \n start = datetime.now()\n error = False\n\n for test in tests:\n result = romanToInt(test['input'][0])\n if result != test['output']:\n error = True\n print('Input:', str(test['input'])[1:-1])\n print('Expected:', test['output'])\n print('Got:', result)\n break\n\n if error:\n print('Error')\n else:\n end = datetime.now()\n time = end - start\n print(time.seconds * 1000 + time.microseconds / 1000, 'ms')\n\nexcept Exception as e:\n print(type(e).__name__)\n print(e)\n print('Error')\n", "repo_name": "sanek2233/master-work", "sub_path": "tests/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "11258880921", "text": "import json, urllib, requests\n\nwith urllib.request.urlopen('https://gist.githubusercontent.com/tdreyno/4278655/raw/7b0762c09b519f40397e4c3e100b097d861f5588/airports.json') as url:\n\tdata = json.loads(url.read().decode())\nmyDict = {}\nfor lin in data:\n\tmyDict[lin['code']] = lin['name']\n\njsonDep = json.dumps(myDict)\nfile = open('AirportNames.txt','w') #creamos el archivo\nfile.writelines(jsonDep)#le metemos los datos\nfile.close() #cerramos el archivo", "repo_name": "IgTriguero/MadridDashboardScripts", "sub_path": "AerName.py", "file_name": "AerName.py", "file_ext": "py", "file_size_in_byte": 449, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "urllib.request.urlopen", "line_number": 3, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 3, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 4, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "34483773398", "text": "#Se importa el programa HCRfinal (donde se hace la resolución del problema)\nimport HCRfinal\n#Se importa la librería pygame\nimport pygame\n\n#Se creo la función redrawGameWindow con los parámetros Dir, p1 y p2\ndef redrawGameWindow(Dir, p1, p2):\n global x, y, Side_A, Side_B #Se declaran variables globales x, y, Side_A y Side_B\n # \n win.blit(bg,(0,0))\n ypos = 300\n for item in Side_A:\n win.blit(item,(5,ypos))\n ypos = ypos - 60\n\n ypos = 300\n for item in Side_B:\n win.blit(item,(450,ypos))\n ypos = ypos - 60\n\n if p1 != 'Unknown':\n if right:\n win.blit(BoatRight,(x,y))\n win.blit(farmer,(x,y-50))\n if p2 != farmer:\n win.blit(p2,(x+50,y-50)) \n elif left:\n win.blit(BoatLeft,(x,y))\n win.blit(farmer,(x,y-50))\n if p2 != farmer:\n win.blit(p2,(x+50,y-50)) \n else:\n win.blit(char,(x, y))\n pygame.display.update()\n\n#Se creo la función get_characters, con los parámetros d, p1 y p2\ndef get_characters(d, p1, p2):\n #Se hace uso de una condicional\n if p2 == 'Zorro': #Si p2 es igual a zorro, character valdrá fox\n character = fox\n elif p2 == 'Maiz': #Si p2 es igual a maiz, character valdrá corn\n character = corn\n elif p2 == 'Ganzo': #Si p2 es igual a ganzo, character valdrá duck\n character = duck\n else: #Si no, character valdrá falmer\n character = farmer\n return (d, farmer, character) #La función regresa d, famer y character\n\n#Se creo la función Embark_characters, con los parámetros B, p1, p2\ndef Embark_characters(B, p1, p2):\n #Se hace uso de condicionales\n if p1 in B: #Si p1 esta en B\n B.remove(p1) #Será removido p1 del arreglo B \n if p2 in B: #si p2 esta en B\n B.remove(p2) #Será removido p2 del arreglo B\n\n#Se creo la función Disembark_characters, con los parámetros A, p1, p2 \ndef Disembark_characters(A, p1, p2):\n #Se hace uso de condicionales\n if p1 not in A: #si p1 no está en A\n A.append(p1) #p1 será agregado al arreglo A\n if p2 not in A: #si p2 no está en B\n A.append(p2) #p2 será agregado al arreglo B\n\n#Se creo la función HCR_anmacion, con el parámetro P \ndef HCR_animacion(P):\n #Se declaran las variables globales x, y, left, right, vel, Side_A, Side_B\n global x, y, left, right, vel\n global Side_A, Side_B\n\n clock = pygame.time.Clock()\n run = True\n move = 0\n while run:\n clock.tick(27)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n left = True\n right = False\n if move < len(P):\n direction, p1, p2 = get_characters(P[move], P[move + 1], P[move + 2])\n Embark_characters(Side_B, p1, p2)\n for step in range(65):\n x -= vel\n redrawGameWindow(direction, p1, p2)\n pygame.time.delay(70)\n move += 3\n Disembark_characters(Side_A, p1, p2)\n\n elif keys[pygame.K_RIGHT]:\n right = True\n left = False\n if move < len(P):\n direction, p1, p2 = get_characters(P[move], P[move + 1], P[move + 2])\n Embark_characters(Side_A, p1, p2)\n for step in range(65):\n x += vel\n redrawGameWindow(direction, p1, p2)\n pygame.time.delay(70)\n move += 3\n Disembark_characters(Side_B, p1, p2)\n else:\n redrawGameWindow ('Standby','Unknown', 'Unknown')\n \n\n pygame.quit()\n\n#Se creo la función Busca_solucion\ndef Busca_solucion():\n P = HCRfinal.HCR() #P valdrá el retorno de la función HCRfinal.HCR()\n while len(P) > 22: #mientras p mida más de 22, se corre el programa\n #while len(P) > 42:\n HCRfinal.reiniciar_sistema() #HCRfinal hará uso de reiniciar_sistema\n print ('\\nBuscando una mejor solución, Longitud del Path', len(P)) #se imprimirá un mensaje indicando que se buscará una mejor solución y cuanto mide p\n P = HCRfinal.HCR() #P valdrá el retorno de la función HCRfinal.HCR()\n print (P) #Se imprime P\n print (len(P)) #se imprime cuanto mide P\n print ('\\n =====> Solución encontrada:') #Se imprime el mensaje que se econtrlo la solucion\n return (P) #Se regresa P\n\n \n \nP = Busca_solucion() #P valdrá el retorno de la función Busca_solucion\nprint ('Aquí su animación') #Se imprime el mensaje \"Aquí su animación\"\n\npygame.init()\n\nwin = pygame.display.set_mode((500,500))\npygame.display.set_caption(\"How to Cross the River\")\n\nBoatRight = pygame.image.load('BoteRight.png')\nBoatLeft = pygame.image.load('BoteLeft.png')\nbg = pygame.image.load('seaL.png')\nchar = pygame.image.load('BoteRight.png')\nfox = pygame.image.load('fox.png')\ncorn = pygame.image.load('corn.png')\nduck = pygame.image.load('duck.png')\nfarmer = pygame.image.load('farmer.png')\nx = 10\ny = 425\nvel = 5\nleft = False\nright = False\n\nSide_A = [farmer, fox, duck, corn]\nSide_B = []\n\nHCR_animacion(P)\n\n\n\n\n", "repo_name": "ianego/snake", "sub_path": "Final.py", "file_name": "Final.py", "file_ext": "py", "file_size_in_byte": 5335, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.display.update", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 110, "usage_type": "call"}, {"api_name": "HCRfinal.HCR", "line_number": 114, "usage_type": "call"}, {"api_name": "HCRfinal.reiniciar_sistema", "line_number": 117, "usage_type": "call"}, {"api_name": "HCRfinal.HCR", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 135, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 136, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 139, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 139, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 141, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 142, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 142, "usage_type": "attribute"}]} +{"seq_id": "39676108103", "text": "#!/usr/bin/python3\nimport cgi\nimport subprocess\nimport boto3\nimport os\nprint(\"content-type: text/html\")\nprint(\"Access-Control-Allow-Origin: *\")\nprint()\nform = cgi.FieldStorage()\nfileitem = form['filename']\nif fileitem.filename:\n # strip leading path from file name to avoid\n # directory traversal attacks\n fn = os.path.basename(fileitem.filename)\n open('/tmp/' + fn, 'wb').write(fileitem.file.read())\n message = 'The file \"' + fn + '\" was uploaded successfully'\n\nelse:\n message = 'No file was uploaded'\nprint(\"\\n\")\nprint(message)\n#print(\\n)\n#print(filename)\n#print('/tmp/' + fn)\n#print(/tmp/ + fn)\n\"\"\"import cgitb; cgitb.enable()\nform = cgi.FieldStorage()\n# Get filename here.\nfileitem = form['filename']\n# Test if the file was uploaded\nif fileitem.filename:\n # strip leading path from file name to avoid\n # directory traversal attacks\n fn = os.path.basename(fileitem.filename)\n open('/tmp/' + fn, 'wb').write(fileitem.file.read())\n message = 'The file \"' + fn + '\" was uploaded successfully'\n\nelse:\n message = 'No file was uploaded'\n\nprint \"\"\"\\\n#Content-Type: text/html\\n\n#\n#\n #

%s

\n#\n#\n\"\"\" % (message,)\n\nprint \"hello\"\nprint \"%s\" % (fileitem.filename)\nprint \"%s\" % (fn)\n\"\"\"\n\n\n\n#import boto3\n\n\nregion= \"us-west-1\"\nbucket = \"team12345\"\nupimage = \"file2.jpg\"\nmyphoto = '/tmp/' + fn\nprint(\"\\n\")\n#print(region)\n#print(myphoto)\n\ndef face_reader():\n try:\n s3 = boto3.resource('s3')\n s3.Bucket(bucket).upload_file(myphoto , upimage)\n #print(myphoto)\n rek = boto3.client('rekognition' , region )\n response = rek.detect_labels(\n Image={\n\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': upimage,\n }\n },\n MaxLabels=10,\n MinConfidence=50\n )\n\n # print(\"hello\")\n print(\"\\n\")\n print(\"DETECTED OBJECTS GIVEN BELOW:-\")\n for i in range(10):\n print(\"\\n\")\n print(response['Labels'][i]['Name'])\n if response['Labels'][i]['Name'] == \"Man\":\n smile = \"Your Gender is Male\"\n # output = subprocess.getoutput(\"Your Gender is Male\")\n # print(output)\n print(\"\\n\")\n print(smile)\n #voice(man)\n if response['Labels'][i]['Name'] == \"Woman\":\n smile = \"Your Gender is FeMale\"\n # print(\\n)\n print(\"\\n\")\n print(smile)\n #voice(man)\n #if response['Labels'][i]['Name'] != \"Chair\":\n # break\n if response['Labels'][i]['Name'] == None:\n smile = \"Objects Not Available\"\n print(\"\\n\")\n print(smile)\n \n print(\"\\n\")\n print(\"DETECTED FACE GESTURES GIVEN BELOW:-\")\n resfaces = rek.detect_faces(\n Image={\n\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': upimage,\n\n }\n },\n Attributes=['ALL'])\n\n\n if resfaces['FaceDetails'][0]['Smile']['Value'] == False:\n smile = \"PLease Smile You Look Like Sad\"\n #voice(smile)\n # print(\\n)\n print(\"\\n\")\n print(smile)\n if resfaces['FaceDetails'][0]['Eyeglasses']['Value'] == True:\n smile = \"Ohh I Think You Wear A Eyeglasses\"\n #voice(smile)\n print(\"\\n\")\n print(smile)\n if resfaces['FaceDetails'][0]['Sunglasses']['Value'] == True:\n smile = \"Ohh I Think You Wear A Sunglasses\"\n # voice(smile)\n print(\"\\n\")\n print(smile)\n if resfaces['FaceDetails'][0]['Beard']['Value'] == True:\n smile = \"You Have A Beard Please Shave Your Beard\"\n #voice(smile)\n print(\"\\n\")\n print(smile)\n if resfaces['FaceDetails'][0]['Mustache']['Value'] == True:\n smile = \"You Have also mustache\"\n # voice(smile)\n print(\"\\n\")\n print(smile)\n \n # voice(smile)\n if resfaces['FaceDetails'][0]['AgeRange']['Low'] > 0 :\n min_age=(resfaces['FaceDetails'][0]['AgeRange']['Low'])\n max_age=(resfaces['FaceDetails'][0]['AgeRange']['High'])\n smile = \"Your Age Is Between\" + str(min_age) + \"year to\" + str(max_age) + \"year\"\n # voice(age)\n print(\"\\n\")\n print(smile)\n if resfaces['FaceDetails'][0]['EyesOpen']['Value'] == True:\n smile = \"Your Eyes is Open\"\n print(\"\\n\")\n print(smile)\n # voice(smile)\n if resfaces['FaceDetails'][0]['MouthOpen']['Value'] == True:\n smile = \"Your Mouth is Open\"\n #voice(smile)\n print(\"\\n\")\n print(smile)\n for i in range(7):\n # print(resfaces['FaceDetails'][0]['Emotions'][i]['Confidence'])\n if (resfaces['FaceDetails'][0]['Emotions'][i]['Confidence']) > 50.0000 :\n emotion=(resfaces['FaceDetails'][0]['Emotions'][i]['Type'])\n smile = \"Your Face Emotion is\" + emotion\n #voice(emo)\n print(\"\\n\")\n print(smile)\n\n except Exception as error_msg:\n print(error_msg)\n\nface_reader()\n\n", "repo_name": "pra-cloud/Face-Reader-App-With-AWS-Python-And-Web-Development-", "sub_path": "cgi/way5.py", "file_name": "way5.py", "file_ext": "py", "file_size_in_byte": 5371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cgi.FieldStorage", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "boto3.resource", "line_number": 70, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "18100384886", "text": "#Steamlit_Stock\n\n\n#Republican\":0,\"Democrat\":1\n\nimport datetime\nimport streamlit as st\nimport pandas as pd\nimport altair as alt\nfrom util import train_test_time\n\n\nfrom datetime import datetime\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Ridge\n\nfrom sklearn.dummy import DummyRegressor\nimport numpy as np\nimport re\n\n\nPARTY_ENCODING={\"Republican\":0,\"Democratic\":1}\n# Drop down display for President and Senate and House and add the side bar\nwith st.sidebar:\n prez= st.radio(\"Please pick President's party\",(\"Democratic\",\"Republican\"))\n b_prez=PARTY_ENCODING[prez]\n house = st.radio(\"Please pick majority party for the House of Representatives\",(\"Democratic\",\"Republican\"))\n b_house=PARTY_ENCODING[house]\n senate=st.radio(\"Please majority party for the Senate\",(\"Democratic\",\"Republican\"))\n b_senate=PARTY_ENCODING[senate]\n\n#mortgage rate GDP and Fed funds rate and move to a side bar\nwith st.sidebar:\n mortgage=st.number_input(\"Please insert a 30 year mortgage\")\n st.write(\"the mortgage rate you used is\",mortgage)\n\n gdp=st.number_input(\"Please insert a GDP\")\n st.write(\"The GDP you are using is\",gdp)\n\n fed=st.number_input(\"Please insert a Fed Funds Rate\")\n st.write(\"The Fed Funds you are using is\",fed)\n num=st.number_input(\"Please input the number of days to predict on\")\n st.write(num)\n\n #trying to figure this part out won't work\nwith st.sidebar:\n d = st.date_input(\n \"When do you want to start\",\n datetime(2019, 7, 6,))\n \n\n\n#get data\ndef get_data():\n source=pd.read_csv(\"altformart.csv\",infer_datetime_format=True)\n source[\"Date\"]=pd.to_datetime(source[\"Date\"])\n return source\n\n\nsource=get_data()\n\n\n\nstart=pd.Timestamp(d)\nend=start+pd.DateOffset(months=1)\nnew_source=source[(source.Date > start)&(source.Date qtde:\n energia_semana.pop()\n agua_semana.pop()\n energia_feriado.pop()\n agua_feriado.pop()\n \n dados = genes - len(energia_semana)\n\n for i in range(dados):\n valores = sortearValor()\n\n energia_semana.append(valores)\n energia_feriado.append(valores)\n agua_semana.append(valores)\n agua_feriado.append(valores)\n \n listaComodos.append({\n 'energia_semana': energia_semana,\n 'energia_feriado': energia_feriado,\n 'agua_semana': agua_semana,\n 'agua_feriado': agua_feriado\n })\n pos += 1\n\ndef getPosMes(mes):\n meses = ['Janeiro','Fevereiro','Março','Abril','Maio','Junho','Julho','Agosto','Setembro','Outubro','Novembro','Dezembro']\n return meses.index(mes)\n\ndef gerarAnalise():\n global casa\n global meta\n global listaSemana\n global listaFinalSemana\n \n if casa:\n month = getPosMes(mes.mes) + 1\n clima = Clima.objects.filter(data__month=month)\n \n comodos = Comodo.objects.filter(casa=casa)\n for comodo in comodos:\n resultSemana = []\n resultFinalSemana = []\n resultados = ComodoValorY.objects.filter(comodo=comodo,data__month=month)\n for resultado in resultados: \n for item in clima:\n if resultado.data == item.data and resultado.hora == item.hora:\n if resultado.data.weekday() < 5:\n resultSemana.append({\n 'energia': resultado.meta_energia,\n 'agua': resultado.meta_agua,\n 'temperatura': item.temperatura,\n 'umidade': item.umidade,\n 'vento': item.vento ,\n 'pressao': item.pressao,\n 'chuva': item.chuva\n })\n else:\n resultFinalSemana.append({\n 'energia': resultado.meta_energia,\n 'agua': resultado.meta_agua,\n 'temperatura': item.temperatura,\n 'umidade': item.umidade,\n 'vento': item.vento ,\n 'pressao': item.pressao,\n 'chuva': item.chuva\n })\n listaSemana.append(resultSemana)\n listaFinalSemana.append(resultFinalSemana)\n\ndef executarGenetico():\n pos = 0 #comodo\n perc = 0 #media de acerto de todos comodos\n geracao = 0 #vezes executadas\n\n ini = time.time()\n while pos < len(listaComodos):\n calcularAptidao(listaComodos[pos],listaSemana[pos],listaFinalSemana[pos])\n pos += 1\n\n perc = percentualMelhorGene(listaComodos)\n while perc < percParada and geracao < 100:\n if geracao % 20 != 0:\n x = .4 #mantem 40% da populacao\n else:\n x = .01 #mantem 1%\n\n pos = 0\n while pos < len(listaComodos):\n selecao(listaComodos[pos], x) \n cruzamento(listaComodos[pos])\n calcularAptidao(listaComodos[pos],listaSemana[pos],listaFinalSemana[pos]) \n pos += 1\n\n perc = percentualMelhorGene(listaComodos)\n geracao += 1\n print(\"Geraçao {} Tx. acerto {}\".format(geracao,perc))\n fim = time.time()\n print(\"Tempo {}\".format((fim-ini)))\n\n salvarResultados(listaComodos,perc, (fim-ini))\n\ndef calcularAptidao(comodo,listaSemana,listaFinalSemana):\n total = len(listaSemana)\n\n if comodo['energia_semana'][0]['acerto'] < 99:\n for item in comodo['energia_semana']:\n perc = acerto = 0\n for semana in listaSemana: \n resp = item['temperatura'] * semana['temperatura'] + item['umidade'] * semana['umidade'] + item['vento'] * semana['vento'] + item['pressao'] * semana['pressao'] + item['chuva'] * semana['chuva']\n if semana['energia'] == round(resp):\n acerto += 1\n perc = acerto * 100 / total\n item['acerto'] = perc\n \n if comodo['agua_semana'][0]['acerto'] < 99:\n for item in comodo['agua_semana']:\n perc = acerto = 0\n for semana in listaSemana: \n resp = item['temperatura'] * semana['temperatura'] + item['umidade'] * semana['umidade'] + item['vento'] * semana['vento'] + item['pressao'] * semana['pressao'] + item['chuva'] * semana['chuva']\n if semana['agua'] == round(resp):\n acerto += 1\n perc = acerto * 100 / total\n item['acerto'] = perc\n\n total = len(listaFinalSemana)\n\n if comodo['energia_feriado'][0]['acerto'] < 99:\n for item in comodo['energia_feriado']:\n perc = acerto = 0\n for final in listaFinalSemana: \n resp = item['temperatura'] * final['temperatura'] + item['umidade'] * final['umidade'] + item['vento'] * final['vento'] + item['pressao'] * final['pressao'] + item['chuva'] * final['chuva']\n if final['energia'] == round(resp):\n acerto += 1\n perc = acerto * 100 / total\n item['acerto'] = perc\n \n if comodo['agua_feriado'][0]['acerto'] < 99:\n for item in comodo['agua_feriado']:\n perc = acerto = 0\n for final in listaFinalSemana: \n resp = item['temperatura'] * final['temperatura'] + item['umidade'] * final['umidade'] + item['vento'] * final['vento'] + item['pressao'] * final['pressao'] + item['chuva'] * final['chuva']\n if final['agua'] == round(resp):\n acerto += 1\n perc = acerto * 100 / total\n item['acerto'] = perc\n\ndef ordenar(comodo):\n comodo['energia_semana'] = sorted(comodo['energia_semana'], key=lambda row: row['acerto'], reverse=True)\n comodo['agua_semana'] = sorted(comodo['agua_semana'], key=lambda row: row['acerto'], reverse=True)\n comodo['energia_feriado'] = sorted(comodo['energia_feriado'], key=lambda row: row['acerto'], reverse=True)\n comodo['agua_feriado'] = sorted(comodo['agua_feriado'], key=lambda row: row['acerto'], reverse=True)\n\ndef selecao(comodo,perc):\n global genes \n\n if comodo['energia_semana'][0]['acerto'] < 99 and comodo['agua_semana'][0]['acerto'] < 99 and comodo['energia_feriado'][0]['acerto'] < 99 and comodo['agua_feriado'][0]['acerto'] < 99:\n while len(comodo['energia_semana']) > genes * perc:\n comodo['energia_semana'].pop()\n comodo['agua_semana'].pop()\n comodo['energia_feriado'].pop()\n comodo['agua_feriado'].pop()\n else:\n if comodo['energia_semana'][0]['acerto'] < 99:\n while len(comodo['energia_semana']) > genes * perc:\n comodo['energia_semana'].pop()\n\n if comodo['agua_semana'][0]['acerto'] < 99:\n while len(comodo['agua_semana']) > genes * perc:\n comodo['agua_semana'].pop()\n\n if comodo['energia_feriado'][0]['acerto'] < 99:\n while len(comodo['energia_feriado']) > genes * perc:\n comodo['energia_feriado'].pop()\n \n if comodo['agua_feriado'][0]['acerto'] < 99:\n while len(comodo['agua_feriado']) > genes * perc:\n comodo['agua_feriado'].pop()\n \ndef cruzamento(comodo):\n global genes\n\n #Energia semana\n cromosomos = []\n if comodo['energia_semana'][0]['acerto'] < 99: \n cromosomos = comodo['energia_semana'].copy()\n while len(comodo['energia_semana']) > 0:\n pos = random.randint(0,len(comodo['energia_semana'])-1)\n gene1 = comodo['energia_semana'].pop(pos)\n\n pos = random.randint(0,len(comodo['energia_semana'])-1)\n gene2 = comodo['energia_semana'].pop(pos)\n\n filho1 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .6) + (gene2['temperatura'] * .4),\n 'umidade': (gene1['umidade'] * .6) + (gene2['umidade'] * .4),\n 'vento': (gene1['vento'] * .6) + (gene2['vento'] * .4),\n 'pressao': (gene1['pressao'] * .6) + (gene2['pressao'] * .4),\n 'chuva': (gene1['chuva'] * .6) + (gene2['chuva'] * .4)}\n\n filho2 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .4) + (gene2['temperatura'] * .6),\n 'umidade': (gene1['umidade'] * .4) + (gene2['umidade'] * .6),\n 'vento': (gene1['vento'] * .4) + (gene2['vento'] * .6),\n 'pressao': (gene1['pressao'] * .4) + (gene2['pressao'] * .6),\n 'chuva': (gene1['chuva'] * .4) + (gene2['chuva'] * .6)}\n \n chance = random.randint(0,1000)\n if chance == 999:\n mutacao(filho1)\n elif chance == 998:\n mutacao(filho2)\n\n cromosomos.append(filho1)\n cromosomos.append(filho2)\n\n comodo['energia_semana'] = cromosomos.copy()\n\n #agua semana\n if comodo['agua_semana'][0]['acerto'] < 99:\n cromosomos.clear()\n cromosomos = comodo['agua_semana'].copy()\n while len(comodo['agua_semana']) > 0:\n pos = random.randint(0,len(comodo['agua_semana'])-1)\n gene1 = comodo['agua_semana'].pop(pos)\n\n pos = random.randint(0,len(comodo['agua_semana'])-1)\n gene2 = comodo['agua_semana'].pop(pos)\n\n filho1 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .6) + (gene2['temperatura'] * .4),\n 'umidade': (gene1['umidade'] * .6) + (gene2['umidade'] * .4),\n 'vento': (gene1['vento'] * .6) + (gene2['vento'] * .4),\n 'pressao': (gene1['pressao'] * .6) + (gene2['pressao'] * .4),\n 'chuva': (gene1['chuva'] * .6) + (gene2['chuva'] * .4)}\n\n filho2 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .4) + (gene2['temperatura'] * .6),\n 'umidade': (gene1['umidade'] * .4) + (gene2['umidade'] * .6),\n 'vento': (gene1['vento'] * .4) + (gene2['vento'] * .6),\n 'pressao': (gene1['pressao'] * .4) + (gene2['pressao'] * .6),\n 'chuva': (gene1['chuva'] * .4) + (gene2['chuva'] * .6)}\n\n chance = random.randint(0,1000)\n if chance == 999:\n mutacao(filho1)\n elif chance == 998:\n mutacao(filho2)\n\n\n cromosomos.append(filho1)\n cromosomos.append(filho2)\n\n comodo['agua_semana'] = cromosomos.copy()\n\n #Energia final de semana\n if comodo['energia_feriado'][0]['acerto'] < 99:\n cromosomos.clear() \n cromosomos = comodo['energia_feriado'].copy()\n while len(comodo['energia_feriado']) > 0:\n pos = random.randint(0,len(comodo['energia_feriado'])-1)\n gene1 = comodo['energia_feriado'].pop(pos)\n\n pos = random.randint(0,len(comodo['energia_feriado'])-1)\n gene2 = comodo['energia_feriado'].pop(pos)\n\n filho1 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .6) + (gene2['temperatura'] * .4),\n 'umidade': (gene1['umidade'] * .6) + (gene2['umidade'] * .4),\n 'vento': (gene1['vento'] * .6) + (gene2['vento'] * .4),\n 'pressao': (gene1['pressao'] * .6) + (gene2['pressao'] * .4),\n 'chuva': (gene1['chuva'] * .6) + (gene2['chuva'] * .4)}\n\n filho2 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .4) + (gene2['temperatura'] * .6),\n 'umidade': (gene1['umidade'] * .4) + (gene2['umidade'] * .6),\n 'vento': (gene1['vento'] * .4) + (gene2['vento'] * .6),\n 'pressao': (gene1['pressao'] * .4) + (gene2['pressao'] * .6),\n 'chuva': (gene1['chuva'] * .4) + (gene2['chuva'] * .6)}\n\n chance = random.randint(0,1000)\n if chance == 999:\n mutacao(filho1)\n elif chance == 998:\n mutacao(filho2)\n\n cromosomos.append(filho1)\n cromosomos.append(filho2)\n\n comodo['energia_feriado'] = cromosomos.copy()\n\n #Agua final de semana\n if comodo['agua_feriado'][0]['acerto'] < 99:\n cromosomos.clear()\n cromosomos = comodo['agua_feriado'].copy()\n while len(comodo['agua_feriado']) > 0:\n pos = random.randint(0,len(comodo['agua_feriado'])-1)\n gene1 = comodo['agua_feriado'].pop(pos)\n\n pos = random.randint(0,len(comodo['agua_feriado'])-1)\n gene2 = comodo['agua_feriado'].pop(pos)\n\n filho1 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .6) + (gene2['temperatura'] * .4),\n 'umidade': (gene1['umidade'] * .6) + (gene2['umidade'] * .4),\n 'vento': (gene1['vento'] * .6) + (gene2['vento'] * .4),\n 'pressao': (gene1['pressao'] * .6) + (gene2['pressao'] * .4),\n 'chuva': (gene1['chuva'] * .6) + (gene2['chuva'] * .4)}\n\n filho2 = {'acerto': 0, \n 'temperatura': (gene1['temperatura'] * .4) + (gene2['temperatura'] * .6),\n 'umidade': (gene1['umidade'] * .4) + (gene2['umidade'] * .6),\n 'vento': (gene1['vento'] * .4) + (gene2['vento'] * .6),\n 'pressao': (gene1['pressao'] * .4) + (gene2['pressao'] * .6),\n 'chuva': (gene1['chuva'] * .4) + (gene2['chuva'] * .6)}\n\n chance = random.randint(0,1000)\n if chance == 999:\n mutacao(filho1)\n elif chance == 998:\n mutacao(filho2)\n\n cromosomos.append(filho1)\n cromosomos.append(filho2)\n\n comodo['agua_feriado'] = cromosomos.copy()\n\n completarPopulacao(comodo)\n\ndef completarPopulacao(comodo):\n global genes\n\n while len(comodo['energia_semana']) < genes and comodo['energia_semana'][0]['acerto'] < 99:\n valores = sortearValor()\n comodo['energia_semana'].append(valores)\n \n while len(comodo['agua_semana']) < genes and comodo['agua_semana'][0]['acerto'] < 99:\n valores = sortearValor()\n comodo['agua_semana'].append(valores)\n\n while len(comodo['energia_feriado']) < genes and comodo['energia_feriado'][0]['acerto'] < 99:\n valores = sortearValor()\n comodo['energia_feriado'].append(valores)\n \n while len(comodo['agua_feriado']) < genes and comodo['agua_feriado'] [0]['acerto'] < 99:\n valores = sortearValor()\n comodo['agua_feriado'].append(valores)\n\ndef percentualMelhorGene(listaComodos):\n total = 0\n pos = 0\n while pos < len(listaComodos):\n comodo = listaComodos[pos]\n ordenar(comodo)\n\n total += float(comodo['energia_semana'][0]['acerto'])\n total += float(comodo['agua_semana'][0]['acerto'])\n total += float(comodo['energia_feriado'][0]['acerto'])\n total += float(comodo['agua_feriado'][0]['acerto'])\n \n pos += 1\n \n return total / (len(listaComodos)*4)\n \ndef mutacao(filho):\n gene = random.randint(0,5)\n tipo = random.randint(0,10)\n\n\n if gene == 0:\n if tipo != 0:\n filho['temperatura'] = filho['temperatura'] + random.random()\n else:\n filho['temperatura'] = filho['temperatura'] - random.random()\n elif gene == 1:\n if tipo != 0:\n filho['umidade'] = filho['umidade'] + random.random()\n else:\n filho['umidade'] = filho['umidade'] - random.random()\n elif gene == 2:\n if tipo != 0:\n filho['vento'] = filho['vento'] + random.random()\n else:\n filho['vento'] = filho['vento'] - random.random()\n elif gene == 3:\n if tipo != 0:\n filho['pressao'] = filho['pressao'] + random.random()\n else:\n filho['pressao'] = filho['pressao'] - random.random()\n else:\n if tipo != 0:\n filho['chuva'] = filho['chuva'] + random.random()\n else:\n filho['chuva'] = filho['chuva'] - random.random()\n \ndef salvarResultados(listaComodos,perc,tempo):\n global meta\n global casa\n\n novo = GrupoCoeficiente.objects.create(\n meta_treino = meta,\n gerador = \"Algoritmo genetico\",\n precisao = perc,\n tempo_treino= tempo\n )\n \n comodos = Comodo.objects.filter(casa=casa)\n pos = 0\n for comodo in comodos:\n dados = listaComodos[pos]\n\n Coeficiente.objects.create(\n comodo = comodo,\n grupo = novo,\n precisao = dados['energia_semana'][0]['acerto'],\n energia =True,\n semana = True,\n temperatura = dados['energia_semana'][0]['temperatura'],\n umidade = dados['energia_semana'][0]['umidade'],\n vento = dados['energia_semana'][0]['vento'],\n pressao = dados['energia_semana'][0]['pressao'],\n chuva = dados['energia_semana'][0]['chuva']\n )\n\n Coeficiente.objects.create(\n comodo = comodo,\n grupo = novo,\n precisao = dados['agua_semana'][0]['acerto'],\n energia = False,\n semana = True,\n temperatura = dados['agua_semana'][0]['temperatura'],\n umidade = dados['agua_semana'][0]['umidade'],\n vento = dados['agua_semana'][0]['vento'],\n pressao = dados['agua_semana'][0]['pressao'],\n chuva = dados['agua_semana'][0]['chuva']\n )\n\n Coeficiente.objects.create(\n comodo = comodo,\n grupo = novo,\n precisao = dados['energia_feriado'][0]['acerto'],\n energia =True,\n semana = False,\n temperatura = dados['energia_feriado'][0]['temperatura'],\n umidade = dados['energia_feriado'][0]['umidade'],\n vento = dados['energia_feriado'][0]['vento'],\n pressao = dados['energia_feriado'][0]['pressao'],\n chuva = dados['energia_feriado'][0]['chuva']\n )\n\n Coeficiente.objects.create(\n comodo = comodo,\n grupo = novo,\n precisao = dados['agua_feriado'][0]['acerto'],\n energia = False,\n semana = False,\n temperatura = dados['agua_feriado'][0]['temperatura'],\n umidade = dados['agua_feriado'][0]['umidade'],\n vento = dados['agua_feriado'][0]['vento'],\n pressao = dados['agua_feriado'][0]['pressao'],\n chuva = dados['agua_feriado'][0]['chuva']\n )\n pos += 1\n\n", "repo_name": "reduPKR/projeto_casa_python", "sub_path": "projeto_casa/core/pages/genetico/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 25563, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 76, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 86, "usage_type": "call"}, {"api_name": "random.random", "line_number": 90, "usage_type": "call"}, {"api_name": "random.random", "line_number": 92, "usage_type": "call"}, {"api_name": "random.random", "line_number": 94, "usage_type": "call"}, {"api_name": "random.random", "line_number": 97, "usage_type": "call"}, {"api_name": "random.random", "line_number": 99, "usage_type": "call"}, {"api_name": "random.random", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 256, "usage_type": "call"}, {"api_name": "time.time", "line_number": 278, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 368, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 371, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 388, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 404, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 407, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 424, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 441, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 444, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 461, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 477, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 480, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 497, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 546, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 547, "usage_type": "call"}, {"api_name": "random.random", "line_number": 552, "usage_type": "call"}, {"api_name": "random.random", "line_number": 554, "usage_type": "call"}, {"api_name": "random.random", "line_number": 557, "usage_type": "call"}, {"api_name": "random.random", "line_number": 559, "usage_type": "call"}, {"api_name": "random.random", "line_number": 562, "usage_type": "call"}, {"api_name": "random.random", "line_number": 564, "usage_type": "call"}, {"api_name": "random.random", "line_number": 567, "usage_type": "call"}, {"api_name": "random.random", "line_number": 569, "usage_type": "call"}, {"api_name": "random.random", "line_number": 572, "usage_type": "call"}, {"api_name": "random.random", "line_number": 574, "usage_type": "call"}]} +{"seq_id": "7955434556", "text": "from __future__ import print_function\n\nimport keras\n\nfrom keras import layers\nfrom keras.layers import Input\nfrom keras.layers import Dense,Conv2D,MaxPooling2D,ZeroPadding2D,AveragePooling2D\nfrom keras.layers import Activation,BatchNormalization,Flatten\nfrom keras.layers import Lambda, Reshape, Multiply, Concatenate\nfrom keras.models import Model\n\nfrom keras.preprocessing import image\nimport keras.backend as K\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import decode_predictions\nfrom keras.applications.imagenet_utils import preprocess_input\n\nfrom PIL import Image\nfrom utils.utils import get_heatmap_mask\nimport numpy as np\nimport tensorflow as tf\n\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block):\n\n filters1, filters2, filters3 = filters\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size,padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n x = Activation('relu')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n\n filters1, filters2, filters3 = filters\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), strides=strides,\n name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size, padding='same',\n name=conv_name_base + '2b')(x)\n x = BatchNormalization(name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(name=bn_name_base + '2c')(x)\n\n shortcut = Conv2D(filters3, (1, 1), strides=strides,\n name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)\n\n x = layers.add([x, shortcut])\n x = Activation('relu')(x)\n return x\n\n\ndef ResNet50(input_shape=[224, 224, 3], classes=1000):\n\n img_input = Input(shape=input_shape)\n\n x_inp = ZeroPadding2D((3, 3))(img_input)\n x_inp = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x_inp)\n x_inp = BatchNormalization(name='bn_conv1')(x_inp)\n x_inp = Activation('relu')(x_inp)\n x_inp = MaxPooling2D((3, 3), strides=(2, 2))(x_inp)\n\n # channels split\n img_mask = Lambda(lambda x: x[:, :, :, 0])(img_input)\n img_origin = Lambda(lambda x: x[:, :, :, 1])(img_input)\n img_mask_exp = Lambda(lambda x: K.expand_dims(x, axis=-1))(img_mask)\n img_origin_exp = Lambda(lambda x: K.expand_dims(x, axis=-1))(img_origin)\n\n img_mask_to_conv = ZeroPadding2D((1, 1))(img_mask_exp)\n img_mask_to_conv = Conv2D(32, (3, 3), strides=(2, 2), name='conv1_a')(img_mask_to_conv)\n img_mask_to_conv = BatchNormalization(name='bn_xmask')(img_mask_to_conv)\n img_mask_to_conv = Activation('relu')(img_mask_to_conv)\n\n img_origin_to_conv = ZeroPadding2D((1, 1))(img_origin_exp)\n img_origin_to_conv = Conv2D(32, (3, 3), strides=(2, 2), name='conv1_b')(img_origin_to_conv)\n img_origin_to_conv = BatchNormalization(name='bn_xoriginal')(img_origin_to_conv)\n img_origin_to_conv = Activation('relu')(img_origin_to_conv)\n\n # fusion\n x = Concatenate(axis=-1)([img_mask_to_conv, img_origin_to_conv])\n\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(128, (3, 3), strides=(1, 1), name='conv_new')(x)\n x = BatchNormalization(name='bn_new')(x)\n x = Activation('relu')(x)\n\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = Concatenate(axis=-1)([x, x_inp])\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n x = AveragePooling2D((7, 7), name='avg_pool')(x)\n\n x = Flatten()(x)\n x = Dense(classes, activation='softmax', name='fc1000')(x)\n model = Model(img_input, x, name='resnet50')\n\n return model\n\n\nif __name__ == '__main__':\n model = ResNet50()\n\n\n\n", "repo_name": "liyuatbjut/OCTA-Analysis", "sub_path": "nets/resnet_cls.py", "file_name": "resnet_cls.py", "file_ext": "py", "file_size_in_byte": 5399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.layers.Conv2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 42, "usage_type": "name"}, {"api_name": "keras.layers.Activation", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 71, "usage_type": "name"}, {"api_name": "keras.layers.Activation", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.backend.expand_dims", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 89, "usage_type": "name"}, {"api_name": "keras.layers.Lambda", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.backend.expand_dims", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 90, "usage_type": "name"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 103, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 133, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 135, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "19199252443", "text": "import networkx as nx\n\ndef calc_cost_subs(G,path):\n\tcost = 0.0\n\ti = 0\n\twhile i < len(path)-1:\n\t\tcost += G[path[i]][path[i+1]]['weight']\n\t\ti = i+1\n\treturn cost\n\nG = nx.read_edgelist(\"tsp.txt\")\nk = G.number_of_nodes()\n\nwith open(\"model.txt\", \"r\") as file:\n\tstring = file.readlines()\n\ncheck = string[0].strip()\npath = []\nfor i in range(k+1):\n\tpath.append('0')\nfor i in range(2,len(string)-1):\n\tif \"Bool\" in string[i].strip():\n\t\tif \"true\" in string[i+1].strip():\n\t\t\tx = string[i].strip()\n\t\t\ty = x[x.find(\"v\")+1:x.find(\"_\")]\n\t\t\tz = x[x.find(\"_\")+1:x.find(\" () \")]\n\t\t\tpath[int(z)] = str(y)\n\ncost = calc_cost_subs(G,path)\n\nwith open(\"path.txt\", \"w\") as file:\n\tfile.write(str(path)+\"\\n\")\n\tfile.write(str(cost))\n", "repo_name": "dassarthak18/tsp_sat", "sub_path": "src/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 703, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "networkx.read_edgelist", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "11967408159", "text": "# -*- coding: utf-8 -*-\n\n\n\"\"\"implementation for preprocessing pep725 data\"\"\"\n\nimport argparse\nimport re, uuid\nimport os\nimport sys\nimport pandas as pd\nsys.path.append('../')\nimport config\n\nPHENOPHASE_DESCRIPTIONS_FILE = os.path.join(os.path.dirname(__file__), 'phenophase_descriptions.csv')\nFILE_PREFIX = \"pep725_\"\nCOLUMNS_MAP = {\n 'species': 'scientific_name',\n 'day': 'day_of_year',\n 'lat': 'latitude',\n 'lon': 'longitude',\n 'defined_by': 'phenophase_name'\n}\nFILES = {\n 'data': FILE_PREFIX + 'data.csv',\n 'genus': FILE_PREFIX + 'genus.csv',\n 'species': FILE_PREFIX + 'species.csv',\n 'stations': FILE_PREFIX + 'stations.csv',\n 'phase': FILE_PREFIX + 'phase.csv',\n}\nPROJECT = 'pep725'\nROOT_PATH = os.path.join(os.path.dirname(__file__), '../../')\nINPUT_DIR = os.path.join(ROOT_PATH,'data', PROJECT, 'input')\nOUTPUT_DIR = os.path.join(ROOT_PATH, 'data', PROJECT, 'processed')\nOUTPUT_FILE = os.path.join(OUTPUT_DIR, 'data.csv')\n\n\nclass PreProcessor():\n def main(self):\n parser = argparse.ArgumentParser(description='PEP725 Data Pre-Processor')\n parser.add_argument('chunk_size', help='the chunk size to use', type=int)\n\n args = parser.parse_args()\n self.chunk_size = args.chunk_size\n self.run()\n\n def run(self):\n self.frames = {\n 'genus': pd.read_csv(os.path.join(INPUT_DIR,FILES['genus']), sep=';', header=0, usecols=['genus_id', 'genus'],\n skipinitialspace=True,dtype='object'),\n 'species': pd.read_csv(os.path.join(INPUT_DIR,FILES['species']), sep=';', header=0, skipinitialspace=True,\n usecols=['species_id', 'species'],dtype='object'),\n 'stations': pd.read_csv(os.path.join(INPUT_DIR,FILES['stations']), sep=';', header=0, skipinitialspace=True,\n usecols=['s_id', 'lat', 'lon'],dtype='object'), # , 'alt', 'name']),\n 'phase': pd.read_csv(os.path.join(INPUT_DIR,FILES['phase']), sep=';', header=0,\n usecols=['phase_id', 'description'], skipinitialspace=True,dtype='object'),\n 'phenophase_descriptions': pd.read_csv(PHENOPHASE_DESCRIPTIONS_FILE, header=0, skipinitialspace=True,dtype='object')\n }\n\n chunk_size = 100000\n\n data = pd.read_csv(os.path.join(INPUT_DIR,FILES['data']), sep=';', header=0,\n usecols=['s_id', 'genus_id', 'species_id', 'phase_id', 'year', 'day'],\n skipinitialspace=True,dtype='object', chunksize= chunk_size)\n\n open(OUTPUT_FILE, 'w').close()\n\n for chunk in data:\n self._transform_data(chunk).to_csv(\n OUTPUT_FILE, \n columns=config._parse_headers(self), \n mode='a', \n header=True, \n index=False)\n\n\n def _transform_data(self, data):\n joined_data = data \\\n .merge(self.frames['species'], left_on='species_id', right_on='species_id', how='left') \\\n .merge(self.frames['genus'], left_on='genus_id', right_on='genus_id', how='left') \\\n .merge(self.frames['stations'], left_on='s_id', right_on='s_id', how='left') \\\n .merge(self.frames['phase'], left_on='phase_id', right_on='phase_id', how='left') \\\n .merge(self.frames['phenophase_descriptions'], left_on='description', right_on='field', how='left')\n\n\n joined_data.fillna(\"\", inplace=True) # replace all null values\n\n joined_data = self._filter_data(joined_data)\n\n joined_data['record_id'] = joined_data.apply(lambda x: uuid.uuid4(), axis=1)\n joined_data['specific_epithet'] = joined_data.apply(\n lambda row: re.sub('^%s' % row['genus'], \"\", row['species']).strip(), axis=1)\n joined_data['source'] = 'PEP725'\n joined_data['basis_of_record'] = 'HumanObservation'\n\n joined_data['individualID'] = ''\n joined_data['sub_source'] = ''\n \n joined_data = joined_data.rename(columns=COLUMNS_MAP)\n\n return joined_data\n\n def _filter_data(self, data):\n # we want to drop all data with a description in phenohase_descriptions.csv which is missing a defined_by\n descriptions = self.frames['phenophase_descriptions']\n to_exclude = descriptions[descriptions['defined_by'].isnull()]['field']\n return data[~data['description'].isin(to_exclude)]\n\nif __name__ == '__main__':\n PreProcessor().main()\n", "repo_name": "biocodellc/ppo-data-pipeline", "sub_path": "projects/pep725/data_preprocessor.py", "file_name": "data_preprocessor.py", "file_ext": "py", "file_size_in_byte": 4474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config._parse_headers", "line_number": 70, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 89, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "12930046178", "text": "import requests\nimport bs4\n\n\ndef search_news(query, count=10):\n \"\"\"\n search news by query from kun.uz\n \"\"\"\n news = []\n url = f'https://kun.uz/news/search?q={query}'\n data = requests.get(url)\n soup = bs4.BeautifulSoup(data.text, 'html.parser')\n news_list = soup.select(\".news\")\n\n if not news_list:\n return []\n\n for i in range(count):\n try:\n title = news_list[i].select_one('.news__title').text.strip()\n date = news_list[i].select_one('.news-meta span').text.strip()\n link = news_list[i].select_one('.news__title').get('href')\n except IndexError:\n break\n\n news.append({'title': title, 'date': date, 'link': link})\n\n return news\n", "repo_name": "mirmakhamat/UIC-exam", "sub_path": "news_bot/kunuz.py", "file_name": "kunuz.py", "file_ext": "py", "file_size_in_byte": 731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "38285480324", "text": "import pandas as pd\nfrom ensemble_detectors.ensemble_shared_methods import shared_methods\nimport logging\n\nlogging.basicConfig(filename=\"app_logs.log\",\n format='%(asctime)s %(message)s',\n filemode='a')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nclass moving_average_detection:\n \"\"\"Methods for performing moving average detection\"\"\"\n \n def get_average(arr):\n if len(arr)==0:\n logger.warning('Empty list passed')\n return 0\n total = 0\n count = 0\n for num in arr:\n total = total + num\n count += 1\n return total/count\n\n\n def get_moving_average_coordinates(average_interval, data_points):\n \"\"\"Return the coordinates of the average based on window of last data points\"\"\"\n points_x = data_points['points_x']\n points_y = data_points['points_y']\n average_point_y = []\n average_point_x = []\n i = 0\n while (i < (len(points_y))):\n previous_points = []\n j = 0\n while (j < average_interval):\n if (i-j>0):\n previous_points.append(points_y[i-j])\n j += 1\n if len(previous_points)>0:\n average_point_y.append(moving_average_detection.get_average(previous_points))\n average_point_x.append(points_x[i])\n i = i + 1\n return pd.DataFrame({'points_average_x': average_point_x,'points_average_y': average_point_y})\n\n\n def detect_average_outliers(threshold, average_points, data_points):\n \"\"\"Return the coordinates of the outliers\"\"\"\n detected_ouliters_x = []\n detected_ouliters_y = []\n average_points_x = average_points['points_average_x']\n average_points_y = average_points['points_average_y']\n points_x = data_points['points_x']\n points_y = data_points['points_y']\n bound_mult = 3\n bound = (shared_methods.find_threshold(points_y)*bound_mult)\n\n i = 0\n while i < len(average_points_x):\n if ((points_y[i] < (average_points_y[i]-int(bound))) or (points_y[i] > (average_points_y[i]+int(bound)))):\n detected_ouliters_x.append(points_x[i])\n detected_ouliters_y.append(points_y[i])\n i += 1\n \n return pd.DataFrame({'timestamp': detected_ouliters_x,'data': detected_ouliters_y})\n\n\n def detect_average_outliers_labelled_prediction(threshold, average_points, data_points):\n \"\"\"Return the coordinates of the outliers with confidence score\"\"\"\n predictions_x = []\n predictions_y = []\n confidence = []\n average_points_x = average_points['points_average_x']\n average_points_y = average_points['points_average_y']\n points_x = data_points['points_x']\n points_y = data_points['points_y']\n bound_mult = 3\n bound = (shared_methods.find_threshold(points_y)*bound_mult)\n i = 0\n while i < len(average_points_x):\n predictions_x.append(points_x[i])\n predictions_y.append(points_y[i])\n if (shared_methods.is_data_outside_bounds(points_y[i], average_points_y[i], int(bound))):\n confidence.append(-1 * shared_methods.calculate_confidence_outlier(points_y[i], average_points_y[i], bound))\n elif (points_y[i] > average_points_y[i]):\n confidence.append(((average_points_y[i]+int(bound)) - points_y[i])/bound)\n else:\n confidence.append((points_y[i] - (average_points_y[i]-int(bound)))/bound)\n i += 1\n return pd.DataFrame({'timestamp': predictions_x,'data': predictions_y,'confidence':confidence})\n\n\n def real_time_prediction(previous_data_values, next_data_value):\n \"\"\"Return confidence of next data value using moving average\"\"\"\n confidence = 0\n # get last 10 items in previous data\n i = len(previous_data_values)-2\n if (i <= 15):\n return confidence\n temp = []\n while (i > len(previous_data_values)-12):\n temp.append(previous_data_values[i])\n i -= 1\n previous_data_values = temp\n \n # get threshold\n threshold = shared_methods.find_threshold(previous_data_values)\n\n # get bound\n bound_mult = 3\n bound = (threshold*bound_mult)\n\n # next data within bounds?\n # calculate confidence\n average = moving_average_detection.get_average(previous_data_values)\n\n if (shared_methods.is_data_outside_bounds(next_data_value, average, bound)):\n confidence = -1 * shared_methods.calculate_confidence_outlier(next_data_value, average, bound)\n elif (next_data_value > average):\n confidence = (((average+int(bound)) - next_data_value)/bound)\n else:\n confidence = ((next_data_value - (average-int(bound)))/bound)\n\n # return conf\n return (confidence)", "repo_name": "Liam-Reid-2000/outlier-detection-in-virtual-machines", "sub_path": "ensemble_detectors/moving_average_detection.py", "file_name": "moving_average_detection.py", "file_ext": "py", "file_size_in_byte": 4983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.find_threshold", "line_number": 56, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 56, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.find_threshold", "line_number": 78, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 78, "usage_type": "name"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.is_data_outside_bounds", "line_number": 83, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 83, "usage_type": "name"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.calculate_confidence_outlier", "line_number": 84, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 84, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 90, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.find_threshold", "line_number": 107, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 107, "usage_type": "name"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.is_data_outside_bounds", "line_number": 117, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 117, "usage_type": "name"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods.calculate_confidence_outlier", "line_number": 118, "usage_type": "call"}, {"api_name": "ensemble_detectors.ensemble_shared_methods.shared_methods", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "4442141159", "text": "from tqdm import tqdm\nimport json\nfrom typing import List, Dict\nimport time\n\nfrom transformers import GPT2Tokenizer\nfrom transformers import logging as trfms_log\n\ntrfms_log.set_verbosity_error()\n\nfrom gpt4all import GPT4All\nfrom openai import OpenAI\n\n\nclass BaseLLM:\n def __init__(self, config: Dict, verbose: bool):\n self.config = config\n self.verbose = verbose\n\n def process_with_llm(self, text: str, question: str) -> str:\n \"\"\"Process the text with the LLM for a given question and return a summary.\n\n We split the text into several chunks (that each will fit in the context length\n of the LLM). Then, once we have an answer based on each chunk, we summarize\n all the individual answers into a final answer for the whole text.\"\"\"\n\n chunks = self.chunk_text(text)\n answers = []\n\n with tqdm(chunks, desc=\"Analyzing Chunks\", leave=False) as pbar:\n for chunk_number, chunk in enumerate(pbar, 1):\n answer = self.get_answer(question, chunk)\n\n if self.verbose:\n tqdm.write(f\"Answer for Chunk {chunk_number}: {answer}\\n\")\n\n # Pause in order to prevent rate-limiting\n time.sleep(self.config[\"delay\"])\n\n answers.append(answer)\n\n return self.summarize_answers(question, answers)\n\n def chunk_text(self, text: str) -> List[str]:\n \"\"\"Split the text into chunks based on context_length.\n\n Args:\n text (str): The text to be split into chunks.\n\n Returns:\n List[str]: List of text chunks.\n \"\"\"\n if not self.config.get(\"context_length\", None):\n raise ValueError(\"Context length has not been set.\")\n\n # Initialize tokenizer\n tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n\n # Get token count for prompt (excluding question and chunk)\n prompt_token_length = len(tokenizer.encode(self.config[\"prompt_template\"]))\n\n # Max tokens for each chunk of text (in order to fit in LLM's context length)\n max_chunk_token_length = self.config[\"context_length\"] - prompt_token_length\n\n # Apply a margin factor since we excluded the question and\n # might not be using the same tokenization scheme as the model\n chunk_margin_factor = self.config[\"chunk_margin_factor\"]\n max_chunk_token_length = int(max_chunk_token_length * chunk_margin_factor)\n\n # Tokenize text and split into chunks\n full_text_tokens = tokenizer.encode(text)\n chunks = []\n start_idx = 0\n chunk_overlap_in_tokens = max(5, int(max_chunk_token_length * 0.1))\n\n while start_idx < len(full_text_tokens):\n end_idx = start_idx + max_chunk_token_length\n chunk = tokenizer.decode(full_text_tokens[start_idx:end_idx])\n chunks.append(chunk)\n\n if self.verbose:\n tqdm.write(f\"Chunk {len(chunks)} is {len(chunk)} characters\")\n\n start_idx = end_idx - chunk_overlap_in_tokens\n\n return chunks\n\n def get_answer(self, question: str, chunk: str) -> str:\n \"\"\"Get an answer from the LLM for a given question and chunk.\"\"\"\n prompt = self.config[\"prompt_template\"].format(question=question, chunk=chunk)\n answer = self.get_LLM_response(prompt)\n return answer\n\n def summarize_answers(self, question: str, answers: List[str]) -> str:\n \"\"\"Summarize all chunk-answers for a given question and return a summary.\"\"\"\n answers_str = \"\\n\\n\".join(\n [\n f\"Answer based on chunk {i}/{len(answers)}: {answer}\"\n for i, answer in enumerate(answers, 1)\n ]\n )\n prompt = self.config[\"summarize_template\"].format(\n question=question, answers=answers_str\n )\n summary = self.get_LLM_response(prompt)\n return summary\n\n def get_LLM_response(self, prompt: str) -> str:\n \"\"\"Get a response from the LLM for a given prompt.\n\n This method should be implemented in subclasses for specific LLMs.\n\n Args:\n prompt (str): The prompt to be sent to the LLM.\n\n Returns:\n str: The response generated by the LLM.\n \"\"\"\n raise NotImplementedError\n\n\nclass LocalLLM(BaseLLM):\n def __init__(self, config: Dict, verbose: bool):\n \"\"\"Initialize a local LLM\"\"\"\n super().__init__(config, verbose)\n\n self.model = GPT4All(config[\"model_name\"])\n\n def get_LLM_response(self, prompt: str) -> str:\n \"\"\"Get a response from the local LLM for a given prompt.\n\n Args:\n prompt (str): The prompt to be sent to the local LLM.\n\n Returns:\n str: The response generated by the local LLM.\n \"\"\"\n return self.model.generate(prompt, self.config[\"max_output_tokens\"])\n\n\nclass OpenAILLM(BaseLLM):\n def __init__(self, config: Dict, verbose: bool):\n super().__init__(config, verbose)\n\n # Check if API key is specified in config, otherwise let OpenAI default to\n # extracting it from environment variable\n if config.get(\"api_key\", None):\n self.client = OpenAI(api_key=config[\"api_key\"])\n\n if self.verbose:\n tqdm.write(\"Using OpenAI API key specified in config file\")\n else:\n self.client = OpenAI()\n\n if self.verbose:\n tqdm.write(\"Using OpenAI API key from environment variable\")\n\n def get_LLM_response(self, prompt):\n response = self.client.chat.completions.create(\n model=self.config[\"model_name\"],\n messages=[{\"role\": \"user\", \"content\": prompt}],\n temperature=self.config[\"temperature\"],\n )\n return response.choices[0].message.content\n\n\ndef llm_factory(model_key, config, verbose=True) -> BaseLLM:\n \"\"\"Factory function to instantiate LLM classes based on the model key.\"\"\"\n\n model_specific_config = config[\"models\"].get(model_key)\n if model_specific_config is None:\n raise ValueError(f\"Configuration for model key '{model_key}' not found.\")\n\n # Merge the general configuration with the model-specific one\n model_config = {**config, **model_specific_config}\n\n # Remove the nested models configuration to avoid duplication\n model_config.pop(\"models\", None)\n\n if model_key == \"local\":\n print(f\"Using a local model: {model_config['model_name']}\")\n return LocalLLM(config=model_config, verbose=verbose)\n elif model_key == \"openai\":\n print(f\"Using an OpenAI model: {model_config['model_name']}\")\n return OpenAILLM(config=model_config, verbose=verbose)\n else:\n raise ValueError(f\"Unknown model key: {model_key}\")\n", "repo_name": "rmattila/arXamination", "sub_path": "arxamination/llm_interaction.py", "file_name": "llm_interaction.py", "file_ext": "py", "file_size_in_byte": 6726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "transformers.logging.set_verbosity_error", "line_number": 9, "usage_type": "call"}, {"api_name": "transformers.logging", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 16, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 30, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 35, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 35, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 57, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer", "line_number": 57, "usage_type": "name"}, {"api_name": "tqdm.tqdm.write", "line_number": 82, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 123, "usage_type": "name"}, {"api_name": "gpt4all.GPT4All", "line_number": 127, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 142, "usage_type": "name"}, {"api_name": "openai.OpenAI", "line_number": 148, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 151, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 151, "usage_type": "name"}, {"api_name": "openai.OpenAI", "line_number": 153, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 156, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 156, "usage_type": "name"}]} +{"seq_id": "14546023265", "text": "from django.urls import path\nfrom . import views\n\n\nurlpatterns=[ \n path('', views.post_page, name='home'),\n path('category//', views.show_category, name='category'),\n path('post//',views.read_post, name='post'),\n path('add_post/',views.add_post, name='add_post'), \n \n \n path('about/', views.about, name='about'), \n path('contact/',views.contact, name='contact'), \n path('login/',views.login, name='login'), \n \n\n]\n\n", "repo_name": "dusnazarov/django_selfedu_course", "sub_path": "1_func_view_pk/coolsite/women/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 519, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "3439015892", "text": "#!/usr/bin/env python \n# -*- coding: utf-8 -*- \nfrom django.forms import ModelForm, widgets, fields\nfrom django import forms\nfrom myadmin import models\n# __all__ = (\n# 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',\n# 'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',\n# 'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',\n# 'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',\n# 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',\n# 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',\n# 'SplitHiddenDateTimeWidget', 'SelectDateWidget',\n# )\n\n\ndef create_modelforms(admin_class=None):\n print('create model form', admin_class)\n _widgets = {}\n\n def __new__(cls, *args, **kwargs):\n # print('cls', cls)\n # print('cls base_fields', cls.base_fields.items())\n for field_name, field_obj in cls.base_fields.items():\n # print(field_obj.widget)\n _widgets[field_name] = forms.EmailField\n if isinstance(field_obj.widget, widgets.Select):\n # field增加一个参数,方便前端获取widget 类型\n field_obj.type = 'checkbox'\n elif isinstance(field_obj.widget, widgets.CheckboxInput):\n # field增加一个参数,方便前端获取widget 类型\n field_obj.type = 'checkbox'\n else:\n field_obj.widget.attrs['class'] = 'form-control'\n return forms.ModelForm.__new__(cls)\n\n class Meta:\n model = admin_class.model\n fields = \"__all__\"\n # widgets = {\n # \"username\": forms.Textarea(attrs={'class': 'c1'}, )\n # }\n\n attr = {'Meta': Meta}\n obj = type('mf11', (forms.ModelForm,), attr)\n setattr(obj, '__new__', __new__)\n return obj\n\n\nclass UserInfoModelForm(forms.ModelForm):\n is_rember = forms.fields.CharField(widget=forms.widgets.CheckboxInput) # 自定义额外字段\n\n class Meta:\n model = models.Menu\n fields = \"__all__\" # 展示全部\n # fields = [\"username\", \"email\", \"user_type\", ] # 写谁展示谁\n # exclude = [\"username\"] # 排除哪一个\n labels = {\n \"username\": \"用户名\",\n } # 指定label显示名字优先级高于models的verbose_name\n help_texts = {\n \"username\": \"请输入用户名\",\n } # 帮助信息,没啥卵用\n widgets = {\n \"username\": forms.Textarea(attrs={'class': 'c1'}, )\n } # 指定插件\n error_messages = {\n \"__all__\": {}, # 整体错误信息\n \"username\": {\n 'required': \"用户名不能为空\",\n }\n } # 指定错误信息\n field_classes = {\n 'email': forms.fields.EmailField\n } # 定义字段的类\n\n def clean_username(self): # 钩子\n return self.cleaned_data[\"username\"]\n\n\ndef modelform_factory(model, form=ModelForm, fields=None, exclude=None,\n formfield_callback=None, widgets=None, localized_fields=None,\n labels=None, help_texts=None, error_messages=None,\n field_classes=None):\n \"\"\"\n 返回一个包含给定模型的表单字段的ModelForm。\n\n    ``fields``是字段名称的可选列表。如果提供,则只有命名的字段将包含在返回的字段中。如果省略或“__all__”,将使用所有字段。\n\n    ``exclude``是字段名称的可选列表。如果提供,则命名字段将从返回的字段中排除,即使它们在“fields”参数中列出。\n\n    “widgets”是映射到小部件的模型字段名称的字典。\n\n    `localized_fields``是应该被本地化的字段的名字列表。\n\n    ``formfield_callback``是一个可调用的,它使用一个模型字段并返回一个表单域。\n\n    ``labels``是映射到标签的模型字段名字典。\n\n    ``help_texts``是映射到帮助文本的模型字段名字典。\n\n    ``error_messages``是映射到错误消息字典的模型字段名字典。\n\n    ``field_classes``是映射到表单字段类的模型字段名字典。\n \"\"\"\n # 创建内部Meta类。\n # FIXME:理想情况下,我们应该能够构造一个ModelForm,而不需要创建和传递一个临时的内部类。 构建Meta对象将具有的属性列表。\n attrs = {'model': model}\n if fields is not None:\n attrs['fields'] = fields\n if exclude is not None:\n attrs['exclude'] = exclude\n if widgets is not None:\n attrs['widgets'] = widgets\n if localized_fields is not None:\n attrs['localized_fields'] = localized_fields\n if labels is not None:\n attrs['labels'] = labels\n if help_texts is not None:\n attrs['help_texts'] = help_texts\n if error_messages is not None:\n attrs['error_messages'] = error_messages\n if field_classes is not None:\n attrs['field_classes'] = field_classes\n\n # If parent form class already has an inner Meta, the Meta we're\n # creating needs to inherit from the parent's inner meta.\n parent = (object,)\n if hasattr(form, 'Meta'):\n parent = (form.Meta, object)\n Meta = type(str('Meta'), parent, attrs)\n if formfield_callback:\n Meta.formfield_callback = staticmethod(formfield_callback)\n # Give this new form class a reasonable name.\n class_name = model.__name__ + str('Form')\n\n # Class attributes for the new form class.\n form_class_attrs = {\n 'Meta': Meta,\n 'formfield_callback': formfield_callback\n }\n\n if (getattr(Meta, 'fields', None) is None and\n getattr(Meta, 'exclude', None) is None):\n raise ImproperlyConfigured(\n \"Calling modelform_factory without defining 'fields' or \"\n \"'exclude' explicitly is prohibited.\"\n )\n\n # Instantiate type(form) in order to use the same metaclass as form.\n return type(form)(class_name, (form,), form_class_attrs)\n", "repo_name": "topsai/StudentManagement", "sub_path": "myadmin/create_modelform.py", "file_name": "create_modelform.py", "file_ext": "py", "file_size_in_byte": 6019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.forms.EmailField", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django.forms.widgets.Select", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.forms.widgets", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.widgets.CheckboxInput", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.forms.widgets", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.ModelForm.__new__", "line_number": 35, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "django.forms.fields", "line_number": 39, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 45, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 50, "usage_type": "name"}, {"api_name": "django.forms.fields.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.forms.fields", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 51, "usage_type": "name"}, {"api_name": "django.forms.widgets", "line_number": 51, "usage_type": "attribute"}, {"api_name": "myadmin.models.Menu", "line_number": 54, "usage_type": "attribute"}, {"api_name": "myadmin.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.forms.fields", "line_number": 55, "usage_type": "name"}, {"api_name": "django.forms.widgets", "line_number": 64, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 65, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 65, "usage_type": "name"}, {"api_name": "django.forms.fields", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 74, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 81, "usage_type": "name"}, {"api_name": "django.forms.fields", "line_number": 109, "usage_type": "name"}, {"api_name": "django.forms.fields", "line_number": 110, "usage_type": "name"}, {"api_name": "django.forms.widgets", "line_number": 113, "usage_type": "name"}, {"api_name": "django.forms.widgets", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "20459520136", "text": "from itertools import count\nimport collections\nimport functools\nimport sys\nimport weakref\nfrom sims4.callback_utils import add_callbacks, CallbackEvent\nfrom sims4.utils import decorator\nimport sims4.log\nimport sims4.reload\nlogger = sims4.log.Logger('Caches', default_owner='bhill')\nMAX_CACHE_SIZE = 18446744073709551616\nwith sims4.reload.protected(globals()):\n _KEYWORD_MARKER = object()\n use_asm_cache = True\n use_boundary_condition_cache = True\n use_constraints_cache = True\n skip_cache = False\n all_cached_functions = weakref.WeakSet()\n global_cache_version = 0\nCacheInfo = collections.namedtuple('CacheInfo',\n ('hits', 'misses', 'maxsize', 'currsize'))\n\n\ndef clear_all_caches(force=False):\n global global_cache_version\n global_cache_version += 1\n if force or global_cache_version % 1000 == 0:\n for fn in all_cached_functions:\n fn.cache.clear()\n\n\nif not sims4.reload.currently_reloading:\n add_callbacks(CallbackEvent.TUNING_CODE_RELOAD,\n lambda: clear_all_caches(force=True))\n\n\ndef _double_check_failure(cache_result, fn_result, fn, *args, **kwargs):\n exc = AssertionError('Stale Cache Hit')\n frame = sys._getframe(2)\n sims4.log.exception(\n 'Caches',\n 'cache result:{}, function result: {}, function:{} {} {}',\n cache_result,\n fn_result,\n fn,\n args,\n kwargs,\n exc=exc,\n frame=frame)\n\n\n@decorator\ndef cached(fn, maxsize=100, key=None, debug_cache=False):\n key_fn = key\n del key\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n if skip_cache:\n return fn(*args, **kwargs)\n cache = wrapper.cache\n if global_cache_version != wrapper.cache_version:\n cache.clear()\n wrapper.cache_version = global_cache_version\n try:\n if key_fn is None:\n key = (args, _KEYWORD_MARKER,\n frozenset(kwargs.items())) if kwargs else args\n else:\n key = key_fn(*args, **kwargs)\n result = cache[key]\n except TypeError as exc:\n if len(exc.args) == 1 and exc.args[0].startswith(\n 'unhashable type'):\n logger.callstack(\n 'Cache failed on {} in function argument(s):\\nargs={} kwargs={}\\nTry one of the following: use hashable types as arguments to the function (e.g. tuple instead of list) or implement __hash__() on the unhashable object.',\n exc.args[0],\n args,\n kwargs,\n level=sims4.log.LEVEL_ERROR,\n owner='bhill')\n raise exc\n except KeyError:\n cache[key] = result = fn(*args, **kwargs)\n if maxsize is not None and len(cache) > maxsize:\n cache.popitem(last=False)\n return result\n\n def cache_info():\n raise AttributeError(\n 'Cache statistics not tracked in optimized Python.')\n\n wrapper.cache = {} if maxsize is None else collections.OrderedDict()\n wrapper.cache_version = global_cache_version\n wrapper.uncached_function = fn\n wrapper.cache_info = cache_info\n all_cached_functions.add(wrapper)\n return wrapper\n\n\n@decorator\ndef cached_generator(fn, cache_decorator=cached, **cache_kwargs):\n @cache_decorator(**cache_kwargs)\n @functools.wraps(fn)\n def _wrapper(*args, **kwargs):\n return ([], fn(*args, **kwargs))\n\n @functools.wraps(_wrapper)\n def yielder(*args, **kwargs):\n (computed_values, gen) = _wrapper(*args, **kwargs)\n try:\n for i in count():\n if i >= len(computed_values):\n computed_values.append(next(gen))\n yield computed_values[i]\n except StopIteration:\n pass\n\n return yielder\n\n\ndef uncached(wrapper):\n return wrapper.uncached_function\n\n\nclass BarebonesCache(dict):\n __qualname__ = 'BarebonesCache'\n __slots__ = ('uncached_function', )\n\n def __init__(self, uncached_function):\n self.uncached_function = uncached_function\n\n def __repr__(self):\n return '{}({})'.format(type(self).__qualname__, self.uncached_function)\n\n __call__ = dict.__getitem__\n\n def __missing__(self, key):\n self[key] = ret = self.uncached_function(key)\n return ret\n", "repo_name": "johndpope/sims4-ai-engine", "sub_path": "core/caches.py", "file_name": "caches.py", "file_ext": "py", "file_size_in_byte": 4376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sims4.callback_utils.log.Logger", "line_number": 10, "usage_type": "call"}, {"api_name": "sims4.callback_utils.log", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sims4.callback_utils", "line_number": 10, "usage_type": "name"}, {"api_name": "sims4.callback_utils.reload.protected", "line_number": 12, "usage_type": "call"}, {"api_name": "sims4.callback_utils.reload", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sims4.callback_utils", "line_number": 12, "usage_type": "name"}, {"api_name": "weakref.WeakSet", "line_number": 18, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 20, "usage_type": "call"}, {"api_name": "sims4.callback_utils.reload", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sims4.callback_utils", "line_number": 32, "usage_type": "name"}, {"api_name": "sims4.callback_utils.add_callbacks", "line_number": 33, "usage_type": "call"}, {"api_name": "sims4.callback_utils.CallbackEvent.TUNING_CODE_RELOAD", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sims4.callback_utils.CallbackEvent", "line_number": 33, "usage_type": "name"}, {"api_name": "sys._getframe", "line_number": 39, "usage_type": "call"}, {"api_name": "sims4.callback_utils.log.exception", "line_number": 40, "usage_type": "call"}, {"api_name": "sims4.callback_utils.log", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sims4.callback_utils", "line_number": 40, "usage_type": "name"}, {"api_name": "sims4.callback_utils.log", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sims4.callback_utils", "line_number": 80, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 57, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 93, "usage_type": "call"}, {"api_name": "sims4.utils.decorator", "line_number": 52, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 104, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 112, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 108, "usage_type": "call"}, {"api_name": "sims4.utils.decorator", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "10433387106", "text": "import torch\nimport torch.nn as nn\nfrom torchvision.io import read_image, ImageReadMode\nimport os.path as path\nfrom torch.utils.data import Dataset, DataLoader\nimport datetime\nfrom tqdm import tqdm\n\nimport config\n\n\nclass Sample:\n \"\"\"Stores sample for neural network.\"\"\"\n def __init__(self, path, label, length):\n self.path = path\n self.label = label\n self.length = length\n\nclass CustomDataLoader(Dataset):\n \"\"\"\n Provides access to the required data.\n Should be used only in Dataloader.\n \"\"\"\n def __init__(self, dir_path:str, labels_file:str) -> None:\n self.samples = list()\n self.image_mean = list()\n self.image_std = list()\n labels_file = path.join(dir_path, labels_file)\n\n with open(labels_file, 'r') as samples_file:\n for line in samples_file:\n line = line.split()\n\n label = ' '.join(line[1:])\n\n length = len(label)\n label_embedding = torch.zeros(\n config.MAX_LABEL_LENGTH, dtype=torch.int32)\n for i, symbol in enumerate(label):\n label_embedding[i] = config.TERMINALS_TO_INDEXES[symbol]\n\n self.samples.append(Sample(line[0], label_embedding, length))\n\n def __len__(self) -> int:\n return len(self.samples)\n\n def __getitem__(self, ind:int) -> tuple:\n sample = self.samples[ind]\n image = read_image(sample.path, ImageReadMode.GRAY).to(torch.float) \n return image, sample.label, sample.length\n\nclass SimpleHTR(nn.Module):\n \"\"\"Simple Handwriteen Text Recognition System.\"\"\"\n def __init__(self) -> None:\n \"\"\"\n Keyword arguments:\n parameters_file: file, where all weights are stored\n device: device on which all computations should be done\n \"\"\"\n super(SimpleHTR, self).__init__()\n self.normalization = nn.LayerNorm(normalized_shape=[1,config.IMAGE_HEIGHT, config.IMAGE_WIDTH])\n self.__setCNN()\n self.__setRNN()\n\n def __setCNN(self):\n CHANNELS_NUMBER = [1,32,64,128,128,256]\n CONV_KERNEL_SIZES = [5,5,3,3,3]\n POOL_KERNEL_SIZS = POOL_STRIDES = [(2, 2), (2, 2), (2, 1), (2, 1), (2, 1)]\n\n layers = []\n for i in range(5):\n layers.append(nn.Conv2d(in_channels=CHANNELS_NUMBER[i],\n out_channels=CHANNELS_NUMBER[i+1],\n kernel_size=CONV_KERNEL_SIZES[i],\n padding='same'))\n if i & 1:\n layers.append(nn.BatchNorm2d(num_features=CHANNELS_NUMBER[i+1]))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=POOL_KERNEL_SIZS[i],\n stride=POOL_STRIDES[i], padding=0))\n self.layers = nn.ModuleList(layers)\n\n def __forwardCNN(self, x:torch.Tensor):\n for layer in self.layers:\n x = layer(x)\n return x\n\n def __setRNN(self):\n HiddenNum = 256\n self.rnn = nn.LSTM(input_size=HiddenNum, \n hidden_size=HiddenNum, \n num_layers=2, \n batch_first=True, \n bidirectional=True)\n # self.attention = nn.TransformerEncoderLayer(d_model=1024, nhead=4, batch_first=True, norm_first=True)\n self.filter = nn.Conv1d(in_channels=2*HiddenNum,\n out_channels=config.TERMINALS_NUMBER+1,\n kernel_size=3,\n padding='same')\n # self.logits = nn.Linear(in_features=1024*32, out_features=(config.TERMINALS_NUMBER+1)*32)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def __forwardRNN(self, x:torch.Tensor):\n x = x.squeeze(dim=2).transpose(1, 2)\n \n x, (_, _) = self.rnn(x)\n # x = self.attention(x)\n\n x = x.transpose(1, 2)\n # batch_size, timestaps, _ = x.shape\n # x = x.reshape(batch_size,-1)\n x = self.filter(x)\n # x = x.reshape(batch_size, -1, timestaps)\n return self.softmax(x)\n\n def forward(self, images:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Applies all layers to the passed batch.\n\n Keyword arguments:\n images: batch of images of size Bx1xHxW, where B - number of elements in batch,\n H - height of all images, W - width of all images in the batch\n \n Return value:\n Batch of the images of size BxCxT\n \"\"\"\n images = self.normalization(images)\n images = self.__forwardCNN(images)\n images = self.__forwardRNN(images)\n return images\n\n\nclass ModelHandler:\n def __init__(self, \n model:nn.Module, \n optimizer:torch.optim.Optimizer,\n dir_path:str,\n params_file='BestParams.pth',\n cur_params_file='TrainParams.pth',\n device=torch.device('cpu')):\n self.model = model.to(device)\n self.optimizer = optimizer\n self.params_file = path.join(dir_path, params_file)\n self.cur_params_file = path.join(dir_path, cur_params_file)\n self.max_epoch = 0\n self.min_loss = float('inf')\n self.recovered_test = False\n self.recovered_train = False\n self.device = device\n self.history = []\n\n def recover(self, train=False):\n if not train:\n self.recovered_test = True\n self.recovered_train = False\n file = self.params_file\n else:\n self.recovered_train = True\n self.recovered_test = False\n file = self.cur_params_file\n if not path.isfile(file):\n return\n state = torch.load(file)\n\n self.model.load_state_dict(state['model'])\n self.max_epoch = state['epoch']\n self.min_loss = state['loss']\n self.history = state['history']\n self.optimizer.load_state_dict(state['optimizer'])\n\n def save(self, epoch, best=False):\n if best:\n file = self.params_file\n else:\n file = self.cur_params_file\n \n state = {\n 'epoch': epoch,\n 'loss':self.min_loss,\n 'history':self.history,\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n }\n torch.save(state, file)\n \n def get_parameters_number(self):\n if not (self.recovered_train or self.recovered_test):\n raise \"Incorrect execution order\"\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad), \\\n sum(p.numel() for p in self.model.parameters())\n \n def get_training_epoch_number(self):\n if not (self.recovered_train or self.recovered_test):\n raise \"Incorrect execution order\"\n return self.max_epoch\n \n def get_min_loss(self):\n if not (self.recovered_train or self.recovered_test):\n raise \"Incorrect execution order\"\n return self.min_loss\n \n def get_stats(self):\n if not (self.recovered_train or self.recovered_test):\n raise \"Incorrect execution order\"\n return self.history\n \n def train(self, \n train_dataloader: DataLoader, \n val_dataloader:DataLoader, \n epoch_n:int, \n loss_criteria):\n if not self.recovered_train:\n self.recover(train=True)\n bar_format = 'Training: {percentage:3.0f}%|{bar:25}| Epoch {n_fmt}/{total_fmt}, Remainig time {remaining}'\n for epoch in tqdm(range(self.max_epoch+1, epoch_n+1), bar_format=bar_format):\n mean_train_loss = 0.\n train_batches = 0\n epoch_train_start = datetime.datetime.now()\n self.model.train()\n for images, labels, lengths in train_dataloader:\n images, labels, lengths = images.to(\n self.device), labels.to(self.device), lengths.to(self.device)\n\n self.optimizer.zero_grad()\n images_transformed = self.model.forward(images)\n\n input_lengths = torch.full(\n size=[images_transformed.shape[0]],\n fill_value=config.MAX_LABEL_LENGTH,\n dtype=torch.int32, \n device=self.device)\n\n symbols_probabilities = torch.permute(\n images_transformed, (2, 0, 1))\n\n loss = loss_criteria(symbols_probabilities, \n labels,\n input_lengths, \n lengths)\n\n loss_val = loss.item()\n\n loss.backward()\n self.optimizer.step()\n\n mean_train_loss += loss_val\n train_batches += 1\n\n mean_train_loss /= train_batches\n print(f'Эпоха: {epoch} [Обучение], \\\n {(datetime.datetime.now() - epoch_train_start).total_seconds():0.2f} сек')\n print('Среднее значение функции потерь на обучении', mean_train_loss)\n\n epoch_val_start = datetime.datetime.now()\n mean_val_loss = self.evaluate(val_dataloader,\n loss_criteria,\n desc='',\n disable=True)\n print(f'Эпоха: {epoch} [Валидация], \\\n { (datetime.datetime.now() - epoch_val_start).total_seconds():0.2f} сек')\n print('Среднее значение функции потерь на валидации', mean_val_loss)\n\n self.history.append((mean_train_loss, mean_val_loss))\n if mean_val_loss < self.min_loss:\n self.min_loss = mean_val_loss\n self.save(epoch=epoch, best=True)\n self.save(epoch=epoch)\n self.max_epoch = epoch_n\n \n def evaluate(self, test_dataloader, loss_criteria, desc, disable=False):\n self.model.eval()\n mean_test_loss = 0.\n test_batches = 0\n bar_format = '{desc}: {percentage:3.0f}%|{bar:25}| Batch {n_fmt}/{total_fmt}'\n with torch.no_grad():\n for images, labels, lengths in tqdm(test_dataloader, \n bar_format=bar_format,\n desc=desc,\n disable=disable):\n images, labels, lengths = images.to(\n self.device), labels.to(self.device), lengths.to(self.device)\n images_transformed = self.model.forward(images)\n input_lengths = torch.full(\n size=[images_transformed.shape[0]],\n fill_value=config.MAX_LABEL_LENGTH,\n dtype=torch.int32,\n device=self.device)\n\n symbols_probabilities = torch.permute(\n images_transformed, (2, 0, 1))\n\n loss = loss_criteria(symbols_probabilities,\n labels,\n input_lengths,\n lengths)\n mean_test_loss += loss.item()\n test_batches += 1\n mean_test_loss /= test_batches\n return mean_test_loss\n\nclass Image2Word(nn.Module):\n \"\"\"Handwriteen Text Recognition System.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\n Keyword arguments:\n parameters_file: file, where all weights are stored\n device: device on which all computations should be done\n \"\"\"\n super(Image2Word, self).__init__()\n self.normalization = nn.LayerNorm(\n normalized_shape=[1, config.IMAGE_HEIGHT, config.IMAGE_WIDTH])\n self.__setCNN()\n self.__setRNN()\n\n def __setCNN(self):\n CHANNELS_NUMBER = [1, 64, 128, 128, 256, 256, 512, 512]\n CONV_KERNEL_SIZES = [5, 5, 3, 3, 3, 3, 3]\n POOL_KERNEL_SIZS = POOL_STRIDES = [(2, 2), (2, 1), (2, 2), (), (2, 2), (2, 1), (2, 1)]\n\n layers = []\n for i in range(len(CONV_KERNEL_SIZES)):\n layers.append(nn.Conv2d(in_channels=CHANNELS_NUMBER[i],\n out_channels=CHANNELS_NUMBER[i+1],\n kernel_size=CONV_KERNEL_SIZES[i],\n padding='same'))\n if i % 3 == 2:\n layers.append(nn.BatchNorm2d(\n num_features=CHANNELS_NUMBER[i+1]))\n layers.append(nn.ReLU())\n if i % 4 != 3:\n layers.append(nn.MaxPool2d(kernel_size=POOL_KERNEL_SIZS[i],\n stride=POOL_STRIDES[i], padding=0))\n self.layers = nn.ModuleList(layers)\n\n def __forwardCNN(self, x: torch.Tensor):\n for layer in self.layers:\n x = layer(x)\n return x\n\n def __setRNN(self):\n HiddenNum = 512\n self.rnn = nn.LSTM(input_size=HiddenNum,\n hidden_size=HiddenNum,\n num_layers=2,\n batch_first=True,\n bidirectional=True)\n # self.attention = nn.TransformerEncoderLayer(d_model=1024, nhead=4, batch_first=True, norm_first=True)\n self.filter = nn.Conv1d(in_channels=2*HiddenNum,\n out_channels=config.TERMINALS_NUMBER+1,\n kernel_size=3,\n padding='same')\n # self.logits = nn.Linear(in_features=1024*32, out_features=(config.TERMINALS_NUMBER+1)*32)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def __forwardRNN(self, x: torch.Tensor):\n x = x.squeeze(dim=2).transpose(1, 2)\n\n x, (_, _) = self.rnn(x)\n # x = self.attention(x)\n\n x = x.transpose(1, 2)\n # batch_size, timestaps, _ = x.shape\n # x = x.reshape(batch_size,-1)\n x = self.filter(x)\n # x = x.reshape(batch_size, -1, timestaps)\n return self.softmax(x)\n\n def forward(self, images: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Applies all layers to the passed batch.\n\n Keyword arguments:\n images: batch of images of size Bx1xHxW, where B - number of elements in batch,\n H - height of all images, W - width of all images in the batch\n \n Return value:\n Batch of the images of size BxCxT\n \"\"\"\n images = self.normalization(images)\n images = self.__forwardCNN(images)\n images = self.__forwardRNN(images)\n return images\n", "repo_name": "sm0kebamb0o/SimpleHTR", "sub_path": "files/Model.py", "file_name": "Model.py", "file_ext": "py", "file_size_in_byte": 14744, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "config.MAX_LABEL_LENGTH", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "config.TERMINALS_TO_INDEXES", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torchvision.io.read_image", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.io.ImageReadMode.GRAY", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torchvision.io.ImageReadMode", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.float", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "config.IMAGE_HEIGHT", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.IMAGE_WIDTH", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn.LSTM", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "config.TERMINALS_NUMBER", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 136, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 209, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 218, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 218, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 227, "usage_type": "call"}, {"api_name": "config.MAX_LABEL_LENGTH", "line_number": 229, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 230, "usage_type": "attribute"}, {"api_name": "torch.permute", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 251, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 251, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 254, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 254, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 260, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 260, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 275, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.full", "line_number": 283, "usage_type": "call"}, {"api_name": "config.MAX_LABEL_LENGTH", "line_number": 285, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 286, "usage_type": "attribute"}, {"api_name": "torch.permute", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 301, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 301, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 311, "usage_type": "name"}, {"api_name": "config.IMAGE_HEIGHT", "line_number": 312, "usage_type": "attribute"}, {"api_name": "config.IMAGE_WIDTH", "line_number": 312, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 323, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 323, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 328, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 328, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 330, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 330, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 332, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 332, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 334, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 334, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 336, "usage_type": "attribute"}, {"api_name": "torch.nn.LSTM", "line_number": 343, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 343, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 349, "usage_type": "name"}, {"api_name": "config.TERMINALS_NUMBER", "line_number": 350, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 354, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 354, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 356, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 369, "usage_type": "attribute"}]} +{"seq_id": "25168844025", "text": "from typing import Optional\n\nimport pytest\nfrom sqlalchemy.engine.url import make_url\n\nfrom superset.exceptions import SupersetSecurityException\nfrom superset.security.analytics_db_safety import check_sqlalchemy_uri\nfrom tests.integration_tests.test_app import app\n\n\n@pytest.mark.parametrize(\n \"sqlalchemy_uri, error, error_message\",\n [\n (\"postgres://user:password@test.com\", False, None),\n (\n \"sqlite:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"sqlite+pysqlite:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"sqlite+aiosqlite:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"sqlite+pysqlcipher:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"sqlite+:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"sqlite+new+driver:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"sqlite+new+:///home/superset/bad.db\",\n True,\n \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\",\n ),\n (\n \"shillelagh:///home/superset/bad.db\",\n True,\n \"shillelagh cannot be used as a data source for security reasons.\",\n ),\n (\n \"shillelagh+apsw:///home/superset/bad.db\",\n True,\n \"shillelagh cannot be used as a data source for security reasons.\",\n ),\n (\"shillelagh+:///home/superset/bad.db\", False, None),\n (\n \"shillelagh+something:///home/superset/bad.db\",\n False,\n None,\n ),\n ],\n)\ndef test_check_sqlalchemy_uri(\n sqlalchemy_uri: str, error: bool, error_message: Optional[str]\n):\n with app.app_context():\n if error:\n with pytest.raises(SupersetSecurityException) as excinfo:\n check_sqlalchemy_uri(make_url(sqlalchemy_uri))\n assert str(excinfo.value) == error_message\n else:\n check_sqlalchemy_uri(make_url(sqlalchemy_uri))\n", "repo_name": "apache/superset", "sub_path": "tests/integration_tests/security/analytics_db_safety_tests.py", "file_name": "analytics_db_safety_tests.py", "file_ext": "py", "file_size_in_byte": 2634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55269, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Optional", "line_number": 69, "usage_type": "name"}, {"api_name": "tests.integration_tests.test_app.app.app_context", "line_number": 71, "usage_type": "call"}, {"api_name": "tests.integration_tests.test_app.app", "line_number": 71, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 73, "usage_type": "call"}, {"api_name": "superset.exceptions.SupersetSecurityException", "line_number": 73, "usage_type": "argument"}, {"api_name": "superset.security.analytics_db_safety.check_sqlalchemy_uri", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.url.make_url", "line_number": 74, "usage_type": "call"}, {"api_name": "superset.security.analytics_db_safety.check_sqlalchemy_uri", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.url.make_url", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "7405536554", "text": "# Script to find GOES solar X-ray flux data for the same time interval as the CXO observation, after accounting for light travel time.\n\n# Authors: D. M. Weigt, Seán McEntee, Brad Snios\n\n# Relevant packages\nimport pandas as pd\nimport numpy as np\nimport sunpy\nimport sunpy.timeseries as ts\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\nimport astropy.units as u\nfrom astropy.time import Time #convert between different time coordinates\nfrom astropy.time import TimeDelta #add/subtract time intervals \n\n# Reading in the config.ini file containing any hard wired inputs\nimport configparser\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nobsID = str(config['inputs']['obsID'])\n\n# Reading in excel catalogue file with all relevant info\ncatalogue = pd.read_excel('catalogue_all_data.xlsx')\nindex = np.where(catalogue['ObsID'] == int(obsID))[0][0]\n\nstart_date = catalogue['Start Date'][index]\nend_date = catalogue['End Date'][index]\nexp_evt = catalogue['Exposure Time (ks)'][index]\ntstart_evt = catalogue['Tstart'][index]\ntstop_evt = catalogue['Tstop'][index]\n\n# Brad's Horizons code to extract the ephemeris file\nfrom astroquery.jplhorizons import Horizons #automatically download ephemeris \n\n# The start and end times are taken from the horizons file.\ntstart_eph=Time(tstart_evt, format='cxcsec')\ntstop_eph=Time(tstop_evt, format='cxcsec')\ndt = TimeDelta(0.125, format='jd')\n# Below sets the parameters of what observer the ephemeris file is generated form. For example, '500' = centre of the Earth, '500@-151' = CXO\nobj_jup_cxo = Horizons(id=599,location='500@-151',epochs={'start':tstart_eph.iso, 'stop':tstop_eph.iso, 'step':'1m'}, id_type='majorbody')\n\neph_jup_cxo = obj_jup_cxo.ephemerides()\n\nobj_sun_jup = Horizons(id=599,location='500@10',epochs={'start':tstart_eph.iso, 'stop':tstop_eph.iso, 'step':'1m'}, id_type='majorbody')\n\neph_sun_jup= obj_sun_jup.ephemerides()\n\n\"\"\"Setting up event times\"\"\"\njup_cxo_lt = np.mean(eph_jup_cxo['lighttime'])\nsun_jup_lt = np.mean(eph_sun_jup['lighttime'])\nsun_earth_lt = np.mean(eph_sun_jup['earth_lighttime'])\n# earth_jup_lt = np.mean(eph_earth_jup['lighttime'])\nlt = (TimeDelta((sun_jup_lt + jup_cxo_lt - sun_earth_lt) * u.min)).datetime\n\ncxo_tstart = Time(start_date, format='iso') - lt # taking away lt from Earth to Jupiter and then to CXO\ncxo_exp = TimeDelta(exp_evt * u.s).datetime # replace with exposure time of event in seconds\ncxo_tend = cxo_tstart + cxo_exp\n\n# defining start and end times 10 minutes either side of the CXO observation after accounting for lt \ntstart = (cxo_tstart - 600 * u.s).iso\ntend = (cxo_tend + 600 * u.s).iso\n\nif (int(obsID) == 1862) or (int(obsID) == 2519):\n sat_num = 10 # changes based on date of observation\nelse:\n sat_num = 15\nresult = Fido.search(a.Time(tstart, tend), a.Instrument(\"XRS\"))\nresult_goes_cxo = Fido.search(a.Time(tstart, tend), a.Instrument(\"XRS\"), a.goes.SatelliteNumber(sat_num))\n#Note: GOES satellite number will change depending on year! 'result' will show you what satellites are available\ngoes_cwd = str(config['inputs']['goes_cwd']) # will need to change this in config.ini file\nfiles_cxo = Fido.fetch(result_goes_cxo, path=goes_cwd) # fetches the GOES data via sunpy from NOAA dtatabase\ngoes_cxo = ts.TimeSeries(files_cxo, concatenate=True) # creates time series of selected GOES data\n\n# Converting goes lc to something similar to Jupiter light curve\ngoes_time_arr = Time(goes_cxo.index).cxcsec\ngoes_lc_cxo = goes_time_arr[np.where((goes_time_arr > cxo_tstart.cxcsec) & (goes_time_arr < cxo_tend.cxcsec))[0]]\ngoes_lc_cxo_flux = np.array(goes_cxo.quantity('xrsb')[np.where((goes_time_arr > cxo_tstart.cxcsec) & (goes_time_arr < cxo_tend.cxcsec))[0]])\ngoes_lc_cxo = [(x - cxo_tstart.cxcsec)/60 for x in goes_lc_cxo]\n\n# Writing goes data to text file so we don't have to deal with sunpy each time.\nnp.savetxt(f'{obsID}/{obsID}_GOES_XRS_bhardwaj.txt', np.c_[goes_lc_cxo, goes_lc_cxo_flux], delimiter=',', header='time (min),flux (W/m^2)')\n\n", "repo_name": "SeanMcEntee/cxo_goes_disk_study", "sub_path": "find_GOES_data.py", "file_name": "find_GOES_data.py", "file_ext": "py", "file_size_in_byte": 4017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "configparser.ConfigParser", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 26, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 38, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 39, "usage_type": "call"}, {"api_name": "astropy.time.TimeDelta", "line_number": 40, "usage_type": "call"}, {"api_name": "astroquery.jplhorizons.Horizons", "line_number": 42, "usage_type": "call"}, {"api_name": "astroquery.jplhorizons.Horizons", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 53, "usage_type": "call"}, {"api_name": "astropy.time.TimeDelta", "line_number": 55, "usage_type": "call"}, {"api_name": "astropy.units.min", "line_number": 55, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 55, "usage_type": "name"}, {"api_name": "astropy.time.Time", "line_number": 57, "usage_type": "call"}, {"api_name": "astropy.time.TimeDelta", "line_number": 58, "usage_type": "call"}, {"api_name": "astropy.units.s", "line_number": 58, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 58, "usage_type": "name"}, {"api_name": "astropy.units.s", "line_number": 62, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 62, "usage_type": "name"}, {"api_name": "astropy.units.s", "line_number": 63, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 63, "usage_type": "name"}, {"api_name": "sunpy.net.Fido.search", "line_number": 69, "usage_type": "call"}, {"api_name": "sunpy.net.Fido", "line_number": 69, "usage_type": "name"}, {"api_name": "sunpy.net.attrs.Time", "line_number": 69, "usage_type": "call"}, {"api_name": "sunpy.net.attrs", "line_number": 69, "usage_type": "name"}, {"api_name": "sunpy.net.attrs.Instrument", "line_number": 69, "usage_type": "call"}, {"api_name": "sunpy.net.Fido.search", "line_number": 70, "usage_type": "call"}, {"api_name": "sunpy.net.Fido", "line_number": 70, "usage_type": "name"}, {"api_name": "sunpy.net.attrs.Time", "line_number": 70, "usage_type": "call"}, {"api_name": "sunpy.net.attrs", "line_number": 70, "usage_type": "name"}, {"api_name": "sunpy.net.attrs.Instrument", "line_number": 70, "usage_type": "call"}, {"api_name": "sunpy.net.attrs.goes.SatelliteNumber", "line_number": 70, "usage_type": "call"}, {"api_name": "sunpy.net.attrs.goes", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sunpy.net.Fido.fetch", "line_number": 73, "usage_type": "call"}, {"api_name": "sunpy.net.Fido", "line_number": 73, "usage_type": "name"}, {"api_name": "sunpy.timeseries.TimeSeries", "line_number": 74, "usage_type": "call"}, {"api_name": "sunpy.timeseries", "line_number": 74, "usage_type": "name"}, {"api_name": "astropy.time.Time", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "72987184195", "text": "from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.short_create, name=\"add_short_url\"),\n path(\"/\", views.redirect_to_original, name=\"get_short_url\"),\n path('api/docs/', views.api_documentation, name=\"api_docs\"),\n]\n\n\nurlpatterns += [\n path(\"api-v1/\", include('shorts.api.urls')),\n]", "repo_name": "thomas545/Shorting", "sub_path": "shorts/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "750511589", "text": "import os\nimport sys\nfrom PyQt5 import QtCore, QtMultimedia\nimport time\nCURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n\ndef sound():\n\n filename = os.path.join(CURRENT_DIR, \"C:\\\\Users\\\\Zeynab\\\\Downloads\\\\3times.mp3\")\n app2 = QtCore.QCoreApplication(sys.argv)\n player = QtMultimedia.QMediaPlayer()\n url = QtCore.QUrl.fromLocalFile(filename)\n player.setMedia(QtMultimedia.QMediaContent(url))\n player.play()\n\n sys.exit(app2.exec_())\n\nif __name__ == \"__main__\":\n sound()", "repo_name": "zeynabT/cansat", "sub_path": "test/sound.py", "file_name": "sound.py", "file_ext": "py", "file_size_in_byte": 500, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 10, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "PyQt5.QtMultimedia.QMediaPlayer", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtMultimedia", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QUrl.fromLocalFile", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QUrl", "line_number": 12, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtMultimedia.QMediaContent", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtMultimedia", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "25097577783", "text": "from typing import List, Tuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nimport tokenizer\n\n\nclass Beam(object):\n def __init__(self, beam_size, device):\n self.beam_size = beam_size\n # The backpointers at each time-step.\n self.prevKs = []\n # The score for each translation on the beam.\n self.scores: Tensor\n # The outputs at each time-step. [1, beam_size]\n self.nextYs = [torch.zeros(beam_size, dtype=torch.long, device=device)]\n self.nextYs[0][0] = tokenizer.bos_token_id\n # Has EOS topped the beam yet.\n self.eosTop = False\n # Time and k pair for finished. List[Tuple[]]\n self.finished: List[Tuple[Tensor, int, int]] = []\n\n def getCurrentState(self, device):\n \"\"\"\n Get the outputs for the current timestep.\n Return: [beam_size x 1]\n \"\"\"\n return self.nextYs[-1].unsqueeze(1).to(device)\n\n def getCurrentOrigin(self) -> Tensor:\n \"Get the backpointers for the current timestep.\"\n return self.prevKs[-1]\n\n def advance(self, lsm_logits: Tensor):\n \"\"\"\n Given prob over words for every last beam `wordLk`\n\n Parameters:\n\n * `lsm_logits`- probs of advancing from the last step [beam_size x vocab_size]\n \"\"\"\n vocab_size = lsm_logits.size(1)\n\n # Sum the previous scores.\n if self.prevKs:\n beamLk = lsm_logits + self.scores.unsqueeze(1).expand_as(lsm_logits) # [beam_size, vocab_size]\n\n # Don't let EOS have children.\n nextY = self.nextYs[-1]\n for i in range(nextY.size(0)):\n if nextY[i] == tokenizer.eos_token_id:\n beamLk[i] = -1e20\n else:\n beamLk = lsm_logits[0] # [vocab_size]\n\n flatBeamLk = beamLk.view(-1) # [n * vocab_size]\n bestScores, bestScoresId = flatBeamLk.topk(self.beam_size, 0)\n self.scores = bestScores\n\n # bestScoresId is flattened beam x word array, so calculate which\n # word and beam each score came from\n prevK = bestScoresId.div(vocab_size, rounding_mode=\"trunc\")\n nextY = bestScoresId - prevK * vocab_size\n self.prevKs.append(prevK)\n self.nextYs.append(nextY)\n\n for i in range(nextY.size(0)):\n if nextY[i] == tokenizer.eos_token_id:\n self.finished.append((self.scores[i], len(self.nextYs) - 1, i))\n\n # End condition is when top-of-beam is EOS and no global score.\n if nextY[0] == tokenizer.eos_token_id:\n self.eosTop = True\n\n def done(self):\n return self.eosTop and len(self.finished) >= self.beam_size\n\n def getFinal(self) -> List[Tuple[Tensor, int, int]]:\n if not self.finished:\n self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))\n else:\n self.finished.sort(key=lambda a: -a[0])\n\n if len(self.finished) < self.beam_size:\n nextY = self.nextYs[-1]\n unfinished = [(self.scores[i], len(self.nextYs) - 1, i)\n for i in range(nextY.size(0))\n if nextY[i] != tokenizer.eos_token_id]\n unfinished.sort(key=lambda a: -a[0])\n self.finished += unfinished[: self.beam_size - len(self.finished)]\n\n return self.finished[: self.beam_size]\n\n def getHyp(self, beam_res: List[Tuple[Tensor, int, int]]):\n \"\"\"\n Walk back to construct the full hypothesis.\n \"\"\"\n hyps = []\n for _, timestep, k in beam_res:\n hyp = []\n for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):\n hyp.append(self.nextYs[j + 1][k])\n k = self.prevKs[j][k]\n hyps.append(hyp[::-1])\n return hyps\n\n def buildTargetTokens(self, preds):\n def f(pred):\n tokens = []\n for tok in pred:\n if tok == tokenizer.eos_token_id:\n break\n tokens.append(tok)\n return tokens\n\n return [f(pred) for pred in preds]\n", "repo_name": "frezcirno/CodeGAN", "sub_path": "beam.py", "file_name": "beam.py", "file_ext": "py", "file_size_in_byte": 4101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.Tensor", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tokenizer.bos_token_id", "line_number": 18, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 35, "usage_type": "name"}, {"api_name": "tokenizer.eos_token_id", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tokenizer.eos_token_id", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tokenizer.eos_token_id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tokenizer.eos_token_id", "line_number": 89, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 95, "usage_type": "name"}, {"api_name": "tokenizer.eos_token_id", "line_number": 112, "usage_type": "attribute"}]} +{"seq_id": "13703103694", "text": "from typing import Union\nfrom typing import Tuple\n\nfrom ..utils.propagator import propagator\nfrom ..utils.propagator import propagator_fft\nfrom ..utils.map import filter_weight\nimport qutip as q\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef generate_filter(H: Union[list, q.qobj.Qobj],\n t_list: np.ndarray,\n noise_op: q.qobj.Qobj,\n trunc_freq: Tuple = None,\n options=q.Options(atol=1e-10, rtol=1e-10),\n solver: str = 'qutip',\n u0_list: np.ndarray = None,\n prop_array: np.ndarray = None,\n prop_array_fft: np.ndarray = None):\n\n if prop_array_fft is None:\n if prop_array is None:\n prop_array = propagator(H, t_list, options, solver=solver, u0_list=u0_list)\n fk_list, prop_array_fft = propagator_fft(prop_array, t_list, trunc_freq=None)\n\n fk_list, filter_strength, _ = filter_weight(prop_array, t_list, noise_op, trunc_freq, prop_array_fft=prop_array_fft)\n return fk_list, filter_strength\n\n\ndef plot_filter(H, t_list, noise_op, trunc_freq=None, options=q.Options(atol=1e-10, rtol=1e-10), solver='qutip', u0_list=None,\n filters=None, ax=None, prop_array: np.ndarray = None, prop_array_fft: np.ndarray = None):\n\n if filters is None:\n filters = [None, None]\n\n if any(x is None for x in filters):\n fk_list, filter_strength = generate_filter(H, t_list, noise_op, trunc_freq=trunc_freq, options=options,\n solver='qutip', u0_list=u0_list, prop_array=prop_array,\n prop_array_fft=prop_array_fft)\n else:\n fk_list, filter_strength = filters\n\n if ax is None:\n ax = plt.subplot()\n ax.step(fk_list, filter_strength, where='mid')\n ax.fill_between(fk_list, filter_strength, step=\"mid\", alpha=0.4)\n ax.set_xlabel(r'frequency (unit of time$^{-1}$)')\n ax.set_ylabel('filter strength')\n ax.set_xlim(trunc_freq[0], trunc_freq[1])\n ax.set_ylim(0.0,)\n\n return ax\n\n\ndef plot_filter_Sf(H: Union[list, q.qobj.Qobj],\n t_list: np.ndarray,\n noise_op: q.qobj.Qobj,\n f_list: np.ndarray,\n Sf_list: np.ndarray,\n trunc_freq: Tuple = None,\n options=q.Options(atol=1e-10, rtol=1e-10),\n solver: str = 'qutip',\n u0_list: np.ndarray = None,\n filters: list = None,\n ax=None, prop_array: np.ndarray = None,\n prop_array_fft: np.ndarray = None):\n\n if filters is None:\n filters = [None, None]\n\n if prop_array_fft is None:\n if prop_array is None:\n prop_array = propagator(H, t_list, options, solver=solver, u0_list=u0_list)\n fk_list, prop_array_fft = propagator_fft(prop_array, t_list, trunc_freq=None)\n\n if trunc_freq is None:\n trunc_freq = (np.amin(f_list), np.amax(f_list))\n\n if ax is None:\n ax = plt.subplot()\n\n plot_filter(H, t_list, noise_op, trunc_freq=trunc_freq, options=options, solver=solver, u0_list=u0_list,\n filters=filters, ax=ax, prop_array=prop_array, prop_array_fft=prop_array_fft)\n ax2 = ax.twinx()\n ax2.plot(f_list, Sf_list, lw=2, alpha=1, color='k')\n ax2.set_xlim(trunc_freq[0], trunc_freq[1])\n ax2.set_ylim(0.0,)\n ax2.set_ylabel('noise amplitude')\n\n return ax\n\n\ndef plot_filter_Sf_multiple(H: Union[list, q.qobj.Qobj],\n t_list: np.ndarray,\n noise_ops: list,\n f_list_list: list,\n Sf_list_list: list,\n trunc_freq_list: list = None,\n options=q.Options(atol=1e-10, rtol=1e-10),\n solver: str = 'qutip',\n u0_list: np.ndarray = None,\n filters_list: list = None,\n ax=None,\n prop_array: np.ndarray = None,\n prop_array_fft: np.ndarray = None):\n\n if filters_list is None:\n filters_list = [[None] * len(noise_ops), [None] * len(noise_ops)]\n if prop_array_fft is None:\n if prop_array is None:\n prop_array = propagator(H, t_list, options, solver=solver, u0_list=u0_list)\n fk_list, prop_array_fft = propagator_fft(prop_array, t_list, trunc_freq=None)\n\n if len(noise_ops) == 1:\n\n return plot_filter_Sf(H, t_list, noise_ops[0], f_list_list[0], Sf_list_list[0], trunc_freq=trunc_freq_list[0],\n options=options, solver=solver, u0_list=u0_list, filters=[filters_list[0][0], filters_list[1][0]], ax=None,\n prop_array=prop_array, prop_array_fft=prop_array_fft)\n\n if ax is None:\n fig, ax = plt.subplots(len(noise_ops), 1)\n\n for n_ in range(len(noise_ops)):\n\n if trunc_freq_list is None:\n trunc_freq = None\n else:\n trunc_freq = trunc_freq_list[n_]\n\n plot_filter_Sf(H, t_list, noise_ops[n_], f_list_list[n_], Sf_list_list[n_], trunc_freq=trunc_freq,\n options=options, solver=solver, u0_list=u0_list, filters=[filters_list[0][n_], filters_list[1][n_]],\n ax=ax[n_], prop_array=prop_array, prop_array_fft=prop_array_fft)\n plt.tight_layout()\n return ax\n", "repo_name": "hzw770/kdshmap", "sub_path": "kdshmap/core/filter_func.py", "file_name": "filter_func.py", "file_ext": "py", "file_size_in_byte": 5526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Union", "line_number": 12, "usage_type": "name"}, {"api_name": "qutip.qobj", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 13, "usage_type": "attribute"}, {"api_name": "qutip.qobj", "line_number": 14, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 20, "usage_type": "attribute"}, {"api_name": "qutip.Options", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.propagator.propagator", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.propagator.propagator_fft", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.map.filter_weight", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 32, "usage_type": "attribute"}, {"api_name": "qutip.Options", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 56, "usage_type": "name"}, {"api_name": "qutip.qobj", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 57, "usage_type": "attribute"}, {"api_name": "qutip.qobj", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 60, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 67, "usage_type": "attribute"}, {"api_name": "qutip.Options", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.propagator.propagator", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.propagator.propagator_fft", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 94, "usage_type": "name"}, {"api_name": "qutip.qobj", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 106, "usage_type": "attribute"}, {"api_name": "qutip.Options", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.propagator.propagator", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.propagator.propagator_fft", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "19535249718", "text": "#!/usr/bin/env python3\nfrom pwn import *\nimport string\nimport random\nimport tqdm\n\nr = remote(\"127.0.0.1\", 50000)\ndef decrypt(data):\n r.recvline()\n r.sendline(bytes(data.hex(), \"ascii\"))\n decr = bytes.fromhex(str(r.recvline(), \"ascii\")[len(\"Your decrypted data is \"):])\n similarity = float(str(r.recvline(), \"ascii\")[len(\"Your data is \"):].split(\"%\")[0])\n return (decr, similarity)\n\ndata = \"Here's your flag: \"\nprint(len(data))\n\ncur_guess = b\"\"\nsimilarity = 0\ntestdata = b\"\\x00\" * 16\n\nsimilarity = 0\n\nfor blk in range(16):\n for i in range(16):\n best_guess = 0\n for j in tqdm.trange(0x100):\n test = b\"\\x00\" * (len(cur_guess)) + bytes([j])\n test += b\"\\x00\" * ((blk+1)*16 - len(test))\n test += testdata\n #print(test)\n decr, test_sim = decrypt(test)\n _, baseline_sim = decrypt(test[:(blk+1) * 16])\n new_sim = ((blk+2)*test_sim - (blk+1)*baseline_sim)\n if (new_sim >= similarity):\n similarity = new_sim\n #print(f\"New best: ({new_sim}) {decr}\")\n best_guess = decr[16 + len(cur_guess)]\n cur_guess += bytes([best_guess])\n similarity = 0\n try:\n print((cur_guess).decode(\"utf-8\"))\n if (cur_guess[2:].startswith(b\"SSM{\") and cur_guess.endswith(b\"}\")):\n print(f\"Flag: {(cur_guess).decode('utf-8')[2:]}\", flush=True)\n exit(0)\n except:\n pass\n", "repo_name": "Kodsport/sakerhetssm-2023-solutions", "sub_path": "kval/crypto/CyBerCrime/solve-oskar/solve.py", "file_name": "solve.py", "file_ext": "py", "file_size_in_byte": 1483, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tqdm.trange", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "10358841886", "text": "import requests\n\n# 直接采用接口登记案件\ndef recSave():\n s = requests.session()\n s.__module__.encode(encoding='utf-8')\n url = 'http://123.56.170.103:8080/eUrbanMIS0901/login/validpassword'\n data = {'userName':'lk','password':'', 'browserVersion':'chrome/74.0.3729.157','osVersion':'Win10/32','content-type': 'charset=utf-8'}\n headers = {'Accept':'application/json, text/javascript, */*; q=0.01',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',\n 'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Content-Type':'application/x-www-form-urlencoded; charset=utf-8',\n 'Accept-Encoding':'gzip, deflate'\n }\n res = s.post(url, data=data ,headers =headers)\n print(res.content.decode('utf-8'))\n cookies = res.cookies\n # print(res.apparent_encoding)\n\n recSaveUrl = 'http://123.56.170.103:8080/eUrbanMIS0901/home/mis/rec/saverec'\n data3 = {\"eventSrcID\":\"11\",\"eventGradeID\":\"1\",\"recTypeID\":\"1\",\"eventTypeID\":\"1\",\"mainTypeID\":\"348\",\"subTypeID\":\"374\",\n \"eventDesc\":\"挂账案件测试\",\"newInstCondID\":\"1\",\"timeAreaID\":\"2\",\n \"address\":\"世界城加州阳光快递服务站(湖北省武汉市洪山区加州阳光3栋2单元002)(东南44.04米)\",\n \"districtID\":\"1\",\"streetID\":\"102\",\"communityID\":\"10005\",\"cellID\":\"1000010\",\"patrolID\":\"100508\",\n \"partCode\":\"\",\"customDeadline\":\"\",\"patrolDealFlag\":\"0\",\"shopName\":\"\",\"otherTaskNum\":\"\",\n \"returnVisitFlag\":\"0\",\"telReply\":\"\",\"callTypeID\":\"1\",\"telCall\":\"\",\n \"caseEmotion\":\"70\",\"eventDate\":\"\",\"isTransit\":\"1\",\"isOntimeAnswer\":\"1\",\n \"reporterName\":\"匿名\",\"genderID\":\"1\",\"homeAddress\":\"\",\"birthday\":\"\",\n \"idCardType\":\"10\",\"ageRangeID\":\"70\",\"recPushFlag\":\"0\",\"roadDirection\":\"\",\n \"isLawFlag\":\"0\",\"litigantName\":\"\",\"propertyCompanyInfo\":\"\",\"houseCode\":\"\",\n \"unitContactHuman\":\"\",\"dispatchedStreetFlag\":\"0\",\"coordinateX\":\"12736381.6407065\",\n \"coordinateY\":\"3547519.047862\",\"eventSrcName\":\"社会公众举报\",\"eventGradeName\":\"日常\",\n \"recTypeName\":\"城市管理类\",\"eventTypeName\":\"事件\",\"mainTypeName\":\"市容环境\",\n \"subTypeName\":\"病虫害\",\"newInstCondName\":\"绿植(园林树木)病虫害\",\"timeAreaName\":\"核心区\",\n \"districtName\":\"洪山区\",\"streetName\":\"洪山街道1\",\"communityName\":\"洪山社区2\",\n \"cellName\":\"420111001002001\",\"patrolName\":\"wq\",\"noDealContent\":\"\",\"cmDataTableID\":\"-1\",\n \"maxEventTypeID\":\"374\",\"maxEventTypeName\":\"病虫害\",\"funcForbidReporterInfoFlag\":\"1\",\n \"contact\":\"\",\"accepterID\":\"100500\",\"accepterName\":\"lk\",\"squadronID\":\"\",\"squadronName\":\"\",\"isTransitFlag\":\"false\"}\n\n info = s.post(recSaveUrl, data=data3, cookies = cookies, headers = headers)\n print(info.content.decode('utf-8'))\n\n s.close()\n\ndef recTransit(s, url,cookies,headers):\n # 参数意思?\n data = {\"actID\":\"4836\",\"taskListID\":\"2\",\"transInfo\":\"290,887,0,0\",\"opinion\":\"请及时处理!\",\"addNum\":\"0\"}\n res = s.post(url, data = data, headers = headers,cookies = cookies)\n\n\n\nif __name__ == '__main__':\n recSave()", "repo_name": "ZombieLd/workplace", "sub_path": "其它/登记案件.py", "file_name": "登记案件.py", "file_ext": "py", "file_size_in_byte": 3255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.session", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "34056098298", "text": "import sys\nsys.path.append('E:\\Data')\nimport subprocess\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\nSite='Negrocreek'\nYear='2010'\n\n\n\ndf = pd.read_csv(r'C:\\Users\\Farnoush\\Desktop\\Data\\LaCie\\Maxes\\Phies\\Negrocreek\\MaxP-2010-Negrocreek.csv')\ncolumn_labels =list( df.columns.get_values())\n\nPhiarray=[]\n\nfor column in df:\n #print(np.array(df[column]))\n Phiarray.append(np.array(df[column]))\n\nPhiarray=np.array(Phiarray)\nPHIARRAY=np.reshape(Phiarray,(-1,26))\n#PHIARRAY=np.delete(PHIARRAY,0,0)\n\n#print(PHIARRAY)\n\n#print(len(PHIARRAY))\n\n\n'''''''''\nPHIARRAY2=[]\n\nfor j in PHIARRAY:\n for element in PHIARRAY[j] :\n #if Phiarray[j][k]> 180:\n print(PHIARRAY[j])\n\n\n'''''''''\n\nresult = np.where(PHIARRAY > 180)\n\nincol=result[0]\n\ninrow=result[1]\n\n#print('result:',result,result[0],result[1],incol,inrow)\n\n[r,c]=PHIARRAY.shape\n\nPHIARRAY2=PHIARRAY.copy()\n\n\n\nfor i in np.arange(r):\n for j in np.arange(c):\n if PHIARRAY2[i][j] >=180:\n PHIARRAY2[i][j]=PHIARRAY[i][j]-360\n #print(PHIARRAY2[i][j])\n\n\n\n\n#print(Phiarray.dtype,len(PHIARRAY),column_labels )\n\n\ny=[0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13]\n\ni=1\n\nwhile i = 0 and col >= 0 and row < self.rows and col < self.cols:\n yield (row, col, self.grid[row][col])\n row += dr\n col += dc\n\nclass Tree:\n def __init__(self, height, visible=False):\n self.visible = visible\n self.height = height\n self.score = 0\n\n def __repr__(self):\n return \"Tree({}, {})\".format(self.height, self.visible)\n\ndef find_visible(grid, start_row, start_col, dr, dc):\n max_height = -1\n for (row, col, tree) in grid.walk(start_row, start_col, dr, dc):\n if tree.height > max_height:\n max_height = tree.height\n yield tree\n\ndef view(grid, start_row, start_col, dr, dc):\n init_height = grid[start_row][start_col].height\n for (r, c, tree) in grid.walk(start_row + dr, start_col + dc, dr, dc):\n yield tree\n if tree.height >= init_height:\n break\n\ndef view_distance(grid, start_row, start_col, dr, dc):\n return sum(1 for _ in view(grid, start_row, start_col, dr, dc))\n\ndef scenic_score(grid, row, col):\n result = 1\n for dr in [1,-1]:\n result *= view_distance(grid, row, col, dr, 0)\n for dc in [1,-1]:\n result *= view_distance(grid, row, col, 0, dc)\n return result\n\nif __name__ == \"__main__\":\n with open(\"inputs/day08\") as f:\n lines = [line.strip() for line in f]\n grid = Grid(len(lines), len(lines[0]))\n row = 0\n for line in lines:\n for col in range(len(line.strip())):\n grid[row][col] = Tree(int(line[col]))\n row += 1\n\n for row in range(0, grid.rows):\n for tree in find_visible(grid, row, 0, 0, 1):\n tree.visible = True\n for tree in find_visible(grid, row, grid.cols - 1, 0, -1):\n tree.visible = True\n for col in range(0, grid.cols - 1):\n for tree in find_visible(grid, 0, col, 1, 0):\n tree.visible = True\n for tree in find_visible(grid, grid.rows - 1, col, -1, 0):\n tree.visible = True\n\n print(sum(1 for tree in grid if tree.visible))\n\n for row in range(1, grid.rows - 1):\n for col in range(1, grid.cols - 1):\n grid[row][col].score = scenic_score(grid, row, col)\n print(max(tree.score for tree in grid))\n\n\n", "repo_name": "will-snavely/adventofcode", "sub_path": "aoc2022/day08.py", "file_name": "day08.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.chain.from_iterable", "line_number": 22, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "8811927869", "text": "import warnings\n\nfrom qiskit.visualization.counts_visualization import plot_histogram\n\n\ndef iplot_histogram(data, figsize=None, number_to_keep=None,\n sort='asc', legend=None):\n \"\"\" Create a histogram representation.\n Graphical representation of the input array using a vertical bars\n style graph.\n Args:\n data (list or dict): This is either a list of dicts or a single\n dict containing the values to represent (ex. {'001' : 130})\n figsize (tuple): Figure size in pixels.\n number_to_keep (int): The number of terms to plot and\n rest is made into a single bar called other values\n sort (string): Could be 'asc' or 'desc'\n legend (list): A list of strings to use for labels of the data.\n The number of entries must match the length of data.\n Raises:\n VisualizationError: When legend is provided and the length doesn't\n match the input data.\n Returns:\n Figure: A matplotlib figure for the visualization\n Example:\n .. code-block::\n\n from qiskit import QuantumCircuit, BasicAer, execute\n from qiskit.visualization import iplot_histogram\n %matplotlib inline\n qc = QuantumCircuit(2, 2)\n qc.h(0)\n qc.cx(0, 1)\n qc.measure([0, 1], [0, 1])\n backend = BasicAer.get_backend('qasm_simulator')\n job = execute(qc, backend)\n iplot_histogram(job.result().get_counts())\n \"\"\"\n warnings.warn(\n \"The iplot_histogram function is deprecated and will be \"\n \"removed in a future release. The hosted code this depended on no \"\n \"longer exists so this is falling back to use the matplotlib \"\n \"visualizations. qiskit.visualization.plot_histogram should be \"\n \"used directly moving forward.\", DeprecationWarning, stacklevel=2)\n fig = plot_histogram(data, figsize=figsize, number_to_keep=number_to_keep,\n sort=sort, legend=legend)\n return fig\n", "repo_name": "OscarJHernandez/qc_portfolio_optimization", "sub_path": "venv/lib/python3.8/site-packages/qiskit/visualization/interactive/iplot_histogram.py", "file_name": "iplot_histogram.py", "file_ext": "py", "file_size_in_byte": 2146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "61", "api": [{"api_name": "warnings.warn", "line_number": 39, "usage_type": "call"}, {"api_name": "qiskit.visualization.counts_visualization.plot_histogram", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "17236251846", "text": "import logging\n\nfrom redis import Redis\nfrom redis.sentinel import Sentinel\n\n\nlog = logging.getLogger(__name__)\n\n\ndef init_redis(config):\n log.info('init redis')\n if 'sentinel' in config:\n sentinel_conf = config['sentinel']\n socket_timeout = sentinel_conf['socket_timeout']\n sentinel = Sentinel(sentinel_conf['sentinels'],\n socket_timeout=socket_timeout)\n redis = sentinel.master_for(sentinel_conf['service_name'],\n redis_class=Redis,\n socket_timeout=socket_timeout,\n db=config['db'])\n else:\n redis = Redis(**config)\n log.debug('Redis configured, %r' % config)\n yield redis\n", "repo_name": "Josephlouislg/SteamRoulette", "sub_path": "SteamRoulette/service/redis.py", "file_name": "redis.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "redis.sentinel.Sentinel", "line_number": 15, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 18, "usage_type": "name"}, {"api_name": "redis.Redis", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "27494402759", "text": "from typing import Optional\n\nfrom grpc import StatusCode\nfrom rest_framework import status\n\n# Mappings taken from https://github.com/grpc-ecosystem/grpc-gateway/blob/cb1fb905323b977e0ebb77a890696d7e30c9bc96/runtime/errors.go#L34-L77 # noqa: E501\nRPC_TO_HTTP = {\n StatusCode.OK: status.HTTP_200_OK,\n StatusCode.CANCELLED: status.HTTP_408_REQUEST_TIMEOUT,\n StatusCode.UNKNOWN: status.HTTP_500_INTERNAL_SERVER_ERROR,\n StatusCode.INVALID_ARGUMENT: status.HTTP_400_BAD_REQUEST,\n StatusCode.DEADLINE_EXCEEDED: status.HTTP_504_GATEWAY_TIMEOUT,\n StatusCode.NOT_FOUND: status.HTTP_404_NOT_FOUND,\n StatusCode.ALREADY_EXISTS: status.HTTP_409_CONFLICT,\n StatusCode.PERMISSION_DENIED: status.HTTP_403_FORBIDDEN,\n StatusCode.RESOURCE_EXHAUSTED: status.HTTP_429_TOO_MANY_REQUESTS, # may be translated to HTTP_413_REQUEST_ENTITY_TOO_LARGE instead, see below # noqa: E501\n StatusCode.FAILED_PRECONDITION: status.HTTP_400_BAD_REQUEST,\n StatusCode.ABORTED: status.HTTP_409_CONFLICT,\n StatusCode.OUT_OF_RANGE: status.HTTP_400_BAD_REQUEST,\n StatusCode.UNIMPLEMENTED: status.HTTP_501_NOT_IMPLEMENTED,\n StatusCode.INTERNAL: status.HTTP_500_INTERNAL_SERVER_ERROR,\n StatusCode.UNAVAILABLE: status.HTTP_503_SERVICE_UNAVAILABLE,\n StatusCode.DATA_LOSS: status.HTTP_500_INTERNAL_SERVER_ERROR,\n StatusCode.UNAUTHENTICATED: status.HTTP_401_UNAUTHORIZED,\n}\n\n\nclass OrcError(Exception):\n \"\"\"OrcError may be raised by the orchestrator API layer\"\"\"\n\n def __init__(self, *args: object) -> None:\n super().__init__(*args)\n self.details = \"\"\n self.code: Optional[StatusCode] = None\n\n @property\n def http_status(self):\n \"\"\"If the error has a gRPC code, returns the matching HTTP code.\n Otherwise, a generic internal server error is returned.\n \"\"\"\n if self.code:\n if \"message larger than max\" in str(self) and self.code == StatusCode.RESOURCE_EXHAUSTED:\n return status.HTTP_413_REQUEST_ENTITY_TOO_LARGE\n elif self.code in RPC_TO_HTTP:\n return RPC_TO_HTTP[self.code]\n\n return status.HTTP_500_INTERNAL_SERVER_ERROR\n", "repo_name": "Substra/substra-backend", "sub_path": "backend/orchestrator/error.py", "file_name": "error.py", "file_ext": "py", "file_size_in_byte": 2146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 57, "dataset": "github-code", "pt": "61", "api": [{"api_name": "grpc.StatusCode.OK", "line_number": 8, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 8, "usage_type": "name"}, {"api_name": "grpc.StatusCode.CANCELLED", "line_number": 9, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 9, "usage_type": "name"}, {"api_name": "grpc.StatusCode.UNKNOWN", "line_number": 10, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 10, "usage_type": "name"}, {"api_name": "grpc.StatusCode.INVALID_ARGUMENT", "line_number": 11, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 11, "usage_type": "name"}, {"api_name": "grpc.StatusCode.DEADLINE_EXCEEDED", "line_number": 12, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 12, "usage_type": "name"}, {"api_name": "grpc.StatusCode.NOT_FOUND", "line_number": 13, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 13, "usage_type": "name"}, {"api_name": "grpc.StatusCode.ALREADY_EXISTS", "line_number": 14, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 14, "usage_type": "name"}, {"api_name": "grpc.StatusCode.PERMISSION_DENIED", "line_number": 15, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 15, "usage_type": "name"}, {"api_name": "grpc.StatusCode.RESOURCE_EXHAUSTED", "line_number": 16, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 16, "usage_type": "name"}, {"api_name": "grpc.StatusCode.FAILED_PRECONDITION", "line_number": 17, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 17, "usage_type": "name"}, {"api_name": "grpc.StatusCode.ABORTED", "line_number": 18, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 18, "usage_type": "name"}, {"api_name": "grpc.StatusCode.OUT_OF_RANGE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 19, "usage_type": "name"}, {"api_name": "grpc.StatusCode.UNIMPLEMENTED", "line_number": 20, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 20, "usage_type": "name"}, {"api_name": "grpc.StatusCode.INTERNAL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 21, "usage_type": "name"}, {"api_name": "grpc.StatusCode.UNAVAILABLE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 22, "usage_type": "name"}, {"api_name": "grpc.StatusCode.DATA_LOSS", "line_number": 23, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 23, "usage_type": "name"}, {"api_name": "grpc.StatusCode.UNAUTHENTICATED", "line_number": 24, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_408_REQUEST_TIMEOUT", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_504_GATEWAY_TIMEOUT", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_429_TOO_MANY_REQUESTS", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_501_NOT_IMPLEMENTED", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 21, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_503_SERVICE_UNAVAILABLE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 23, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 34, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 34, "usage_type": "name"}, {"api_name": "grpc.StatusCode.RESOURCE_EXHAUSTED", "line_number": 42, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_413_REQUEST_ENTITY_TOO_LARGE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "4570628943", "text": "# ------------------------------------------------------------------------\r\n# INTR\r\n# Copyright (c) 2023 Imageomics Paul. All Rights Reserved.\r\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\r\n# ------------------------------------------------------------------------\r\n# Copied from DETR (https://github.com/facebookresearch/detr)\r\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\r\n# ------------------------------------------------------------------------\r\n\r\nimport os, sys\r\nimport json\r\nimport time\r\nimport random\r\nimport datetime\r\nimport argparse\r\nimport torch\r\nfrom timm.models import create_model\r\nfrom util.losses import DistillationLoss\r\nfrom estimate_model import Predictor, Plot_ROC\r\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\r\nfrom torch.utils.data import DataLoader, DistributedSampler\r\nimport numpy as np\r\nfrom pathlib import Path\r\nimport datasets\r\nfrom timm.utils import NativeScaler\r\nimport util.utils as utils\r\nfrom models import build_model\r\nfrom datasets import build_dataset\r\nfrom util.engine import train_one_epoch, evaluate\r\n\r\n\r\ndef get_args_parser():\r\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\r\n parser.add_argument('--lr', default=3e-4, type=float)\r\n parser.add_argument('--lr_backbone', default=1.00e-5, type=float)\r\n parser.add_argument('--min_lr', default=1.00e-6, type=float)\r\n parser.add_argument('--weight_decay', default=1e-6, type=float)\r\n parser.add_argument('--batch_size', default=16, type=int)\r\n parser.add_argument('--pin_mem', default=True, type=bool)\r\n parser.add_argument('--epochs', default=140, type=int)\r\n parser.add_argument('--lr_drop', default=80, type=int)\r\n parser.add_argument('--lr_scheduler', default=\"StepLR\", type=str, choices=[\"StepLR\", \"CosineAnnealingLR\"])\r\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\r\n help='gradient clipping max norm')\r\n\r\n # * Backbone parameters\r\n parser.add_argument('--backbone', default='resnet50', type=str,\r\n help=\"Name of the convolutional backbone to use\")\r\n parser.add_argument('--dilation', action='store_true',\r\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\r\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\r\n help=\"Type of positional embedding to use on top of the image features\")\r\n\r\n # * Transformer parameters\r\n parser.add_argument('--enc_layers', default=6, type=int,\r\n help=\"Number of encoding layers in the transformer\")\r\n parser.add_argument('--dec_layers', default=6, type=int,\r\n help=\"Number of decoding layers in the transformer\")\r\n parser.add_argument('--dim_feedforward', default=2048, type=int,\r\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\r\n parser.add_argument('--hidden_dim', default=256, type=int,\r\n help=\"Size of the embeddings (dimension of the transformer)\")\r\n parser.add_argument('--dropout', default=0.1, type=float,\r\n help=\"Dropout applied in the transformer\")\r\n parser.add_argument('--nheads', default=8, type=int,\r\n help=\"Number of attention heads inside the transformer's attentions\")\r\n parser.add_argument('--num_queries', default=200, type=int,\r\n help=\"Number of query slots which equals to number of classes\")\r\n parser.add_argument('--pre_norm', action='store_true')\r\n\r\n # * Dataset parameters\r\n parser.add_argument('--dataset_name', default='flower', type=str)\r\n parser.add_argument('--input_size', default=224, type=int)\r\n parser.add_argument('--data_root', default='/mnt/d/flower_data', type=str)\r\n parser.add_argument('--nb_classes', default=5, type=int, help='number classes of your dataset')\r\n parser.add_argument('--output_dir', default='output',\r\n help='path where to save, empty for no saving')\r\n parser.add_argument('--output_sub_dir', default='output_sub')\r\n\r\n # Augmentation parameters\r\n parser.add_argument('--color-jitter', type=float, default=0.3, metavar='PCT',\r\n help='Color jitter factor (default: 0.3)')\r\n parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',\r\n help='Use AutoAugment policy. \"v0\" or \"original\". \" + \\\r\n \"(default: rand-m9-mstd0.5-inc1)'),\r\n parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')\r\n parser.add_argument('--train-interpolation', type=str, default='bicubic',\r\n help='Training interpolation (random, bilinear, bicubic default: \"bicubic\")')\r\n\r\n parser.add_argument('--repeated-aug', action='store_true')\r\n parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')\r\n parser.set_defaults(repeated_aug=True)\r\n\r\n parser.add_argument('--train-mode', action='store_true')\r\n parser.add_argument('--no-train-mode', action='store_false', dest='train_mode')\r\n parser.set_defaults(train_mode=True)\r\n\r\n parser.add_argument('--ThreeAugment', action='store_true') # 3augment\r\n\r\n parser.add_argument('--src', action='store_true') # simple random crop\r\n\r\n # * Random Erase params\r\n parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',\r\n help='Random erase prob (default: 0.25)')\r\n parser.add_argument('--remode', type=str, default='pixel',\r\n help='Random erase mode (default: \"pixel\")')\r\n parser.add_argument('--recount', type=int, default=1,\r\n help='Random erase count (default: 1)')\r\n parser.add_argument('--resplit', action='store_true', default=False,\r\n help='Do not random erase first (clean) augmentation split')\r\n\r\n # Distillation parameters distilled\r\n parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')\r\n parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',\r\n help='Name of teacher model to train (default: \"regnety_160\"')\r\n parser.add_argument('--teacher-path', type=str, default='')\r\n parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help=\"\")\r\n parser.add_argument('--distillation-alpha', default=0.5, type=float, help=\"\")\r\n parser.add_argument('--distillation-tau', default=1.0, type=float, help=\"\")\r\n\r\n\r\n # * Mixup params\r\n parser.add_argument('--mixup', type=float, default=0.8,\r\n help='mixup alpha, mixup enabled if > 0. (default: 0.8)')\r\n\r\n # * INTR parameters\r\n parser.add_argument('--noise_frac', default=0.1, type=float,\r\n help='fraction of noise to be added to new queries while loading pretrained model')\r\n # parser.add_argument('--rm_freeze', default=140, type=int, help='epoch at which the freezing at the encoder is removed')\r\n parser.add_argument('--test', default=\"val\", type=str, choices=[\"val\", \"test\"])\r\n parser.add_argument('--resume', default='', help='resume from checkpoint')\r\n parser.add_argument('--finetune', default='',\r\n help='finetune from pretrained checkpoint (COCO dataset trained for object detection task)')\r\n\r\n # * Device parameters\r\n parser.add_argument('--device', default='cuda',\r\n help='device to use for training / testing')\r\n parser.add_argument('--seed', default=42, type=int)\r\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\r\n help='start epoch')\r\n parser.add_argument('--eval', action='store_true')\r\n\r\n # * Distributed training parameters\r\n parser.add_argument('--num_workers', default=0, type=int)\r\n parser.add_argument('--world_size', default=1, type=int,\r\n help='number of distributed processes')\r\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\r\n return parser\r\n\r\n\r\ndef main(args):\r\n print(args)\r\n utils.init_distributed_mode(args)\r\n device = torch.device(args.device)\r\n\r\n # fix the seed for reproducibility\r\n seed = args.seed + utils.get_rank()\r\n torch.manual_seed(seed)\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n\r\n # model, criterion = build_model(args)\r\n model = build_model(args)\r\n model.to(device)\r\n model_without_ddp = model\r\n\r\n if args.distributed:\r\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\r\n ## for 2-phase training\r\n # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)\r\n model_without_ddp = model.module\r\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\r\n print('number of params:', n_parameters)\r\n\r\n param_dicts = [\r\n {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\r\n {\r\n \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\r\n \"lr\": args.lr_backbone,\r\n },\r\n ]\r\n\r\n loss_scaler = NativeScaler()\r\n\r\n linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0\r\n args.lr = linear_scaled_lr\r\n\r\n print('**************')\r\n print(f'Initial lr is {args.lr}')\r\n print('**************')\r\n\r\n criterion = LabelSmoothingCrossEntropy()\r\n\r\n if args.mixup > 0.:\r\n # smoothing is handled with mixup label transform\r\n criterion = SoftTargetCrossEntropy()\r\n elif args.smoothing:\r\n criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)\r\n else:\r\n criterion = torch.nn.CrossEntropyLoss()\r\n\r\n teacher_model = None\r\n\r\n if args.distillation_type != 'none':\r\n assert args.teacher_path, 'need to specify teacher-path when using distillation'\r\n print(f\"Creating teacher model: {args.teacher_model}\")\r\n teacher_model = create_model(\r\n args.teacher_model,\r\n pretrained=False,\r\n num_classes=args.nb_classes,\r\n global_pool='avg',\r\n )\r\n if args.teacher_path.startswith('https'):\r\n checkpoint = torch.hub.load_state_dict_from_url(\r\n args.teacher_path, map_location='cpu', check_hash=True)\r\n else:\r\n checkpoint = torch.load(args.teacher_path, map_location='cpu')\r\n teacher_model.load_state_dict(checkpoint['model'])\r\n teacher_model.to(args.device)\r\n teacher_model.eval()\r\n\r\n criterion = DistillationLoss(\r\n criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau\r\n )\r\n\r\n if args.lr_scheduler == \"StepLR\":\r\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\r\n weight_decay=args.weight_decay)\r\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\r\n\r\n if args.lr_scheduler == \"CosineAnnealingLR\":\r\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\r\n weight_decay=args.weight_decay)\r\n lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=args.min_lr)\r\n\r\n dataset_train, dataset_val = build_dataset(args)\r\n\r\n if args.distributed:\r\n sampler_train = DistributedSampler(dataset_train)\r\n sampler_val = DistributedSampler(dataset_val, shuffle=False)\r\n else:\r\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\r\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\r\n\r\n batch_sampler_train = torch.utils.data.BatchSampler(\r\n sampler_train, args.batch_size, drop_last=True)\r\n\r\n args.num_workers = args.num_workers if 'linux' in sys.platform else 0\r\n\r\n data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, pin_memory=args.pin_mem,\r\n collate_fn=dataset_train.collate_fn, num_workers=args.num_workers)\r\n data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, pin_memory=args.pin_mem,\r\n drop_last=False, collate_fn=dataset_train.collate_fn, num_workers=args.num_workers)\r\n\r\n # We create output directories to store results\r\n output_dir = Path(args.output_dir)\r\n if not os.path.exists(os.path.join(output_dir, args.dataset_name)):\r\n os.makedirs(os.path.join(output_dir, args.dataset_name), exist_ok=True)\r\n if not os.path.exists(os.path.join(output_dir, args.dataset_name, args.output_sub_dir)):\r\n os.makedirs(os.path.join(output_dir, args.dataset_name, args.output_sub_dir), exist_ok=True)\r\n\r\n max_accuracy = 0.0\r\n\r\n if args.resume:\r\n if args.resume.startswith('https'):\r\n checkpoint = torch.hub.load_state_dict_from_url(\r\n args.resume, map_location='cpu', check_hash=True)\r\n else:\r\n checkpoint = torch.load(args.resume, map_location='cpu')\r\n model_without_ddp.load_state_dict(checkpoint['model'])\r\n max_accuracy = checkpoint['best_score']\r\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\r\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\r\n args.start_epoch = checkpoint['epoch'] + 1\r\n\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n for state in optimizer.state.values():\r\n for k, v in state.items():\r\n if isinstance(v, torch.Tensor):\r\n state[k] = v.cuda()\r\n\r\n if args.eval:\r\n test_stats = evaluate(model, data_loader_val, device)\r\n if args.output_dir and utils.is_main_process():\r\n with (output_dir / args.dataset_name / args.output_sub_dir / \"log.txt\").open(\"a\") as f:\r\n f.write(json.dumps(test_stats) + \"\\n\")\r\n\r\n if args.finetune:\r\n if args.finetune.startswith('https'):\r\n checkpoint = torch.hub.load_state_dict_from_url(\r\n args.finetune, map_location='cpu', check_hash=True)\r\n else:\r\n checkpoint = torch.load(args.finetune, map_location='cpu')\r\n state_dict = checkpoint['model']\r\n state_dict = utils.load_model(args, state_dict)\r\n\r\n model_without_ddp.load_state_dict(state_dict)\r\n\r\n for param in model_without_ddp.parameters():\r\n param.requires_grad = True\r\n model_without_ddp.to(device)\r\n\r\n print(\"Start training\")\r\n start_time = time.time()\r\n\r\n for epoch in range(args.start_epoch, args.epochs):\r\n\r\n if args.distributed:\r\n sampler_train.set_epoch(epoch)\r\n train_stats = train_one_epoch(\r\n model, criterion, data_loader_train, optimizer, device, epoch,\r\n loss_scaler, args.clip_max_norm)\r\n\r\n lr_scheduler.step()\r\n # if args.output_dir:\r\n # checkpoint_paths = [output_dir / args.dataset_name / args.output_sub_dir / 'checkpoint.pth']\r\n #\r\n # if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) == args.epochs:\r\n # checkpoint_paths.append(\r\n # output_dir / args.dataset_name / args.output_sub_dir / f'checkpoint{epoch:04}.pth')\r\n # for checkpoint_path in checkpoint_paths:\r\n # utils.save_on_master({\r\n # 'model': model_without_ddp.state_dict(),\r\n # 'optimizer': optimizer.state_dict(),\r\n # 'lr_scheduler': lr_scheduler.state_dict(),\r\n # 'epoch': epoch,\r\n # 'args': args,\r\n # }, checkpoint_path)\r\n\r\n test_stats = evaluate(\r\n model, data_loader_val, device\r\n )\r\n print(f\"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%\")\r\n\r\n if max_accuracy < test_stats[\"acc1\"]:\r\n max_accuracy = test_stats[\"acc1\"]\r\n if args.output_dir:\r\n checkpoint_paths = [output_dir / 'best_checkpoint.pth']\r\n for checkpoint_path in checkpoint_paths:\r\n utils.save_on_master({\r\n 'model': model_without_ddp.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n 'lr_scheduler': lr_scheduler.state_dict(),\r\n 'epoch': epoch,\r\n 'best_score': max_accuracy,\r\n 'scaler': loss_scaler.state_dict(),\r\n 'args': args,\r\n }, checkpoint_path)\r\n\r\n print(f'Max accuracy: {max_accuracy:.2f}%')\r\n\r\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\r\n **{f'test_{k}': v for k, v in test_stats.items()},\r\n 'epoch': epoch,\r\n 'n_parameters': n_parameters}\r\n\r\n if args.output_dir and utils.is_main_process():\r\n with (output_dir / args.dataset_name / args.output_sub_dir / \"log.txt\").open(\"a\") as f:\r\n f.write(json.dumps(log_stats) + \"\\n\")\r\n\r\n total_time = time.time() - start_time\r\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\r\n print('Training time {}'.format(total_time_str))\r\n\r\n # plot ROC curve and confusion matrix\r\n # print('*******************STARTING PREDICT*******************')\r\n # Predictor(model_without_ddp, data_loader_val, args.resume, device)\r\n # Plot_ROC(model_without_ddp, data_loader_val, args.resume, device)\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser('INTR training and evaluation script', parents=[get_args_parser()])\r\n args = parser.parse_args()\r\n if args.output_dir:\r\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\r\n main(args)\r\n", "repo_name": "jiaowoguanren0615/INTR-Pytorch", "sub_path": "INTR/train_gpu.py", "file_name": "train_gpu.py", "file_ext": "py", "file_size_in_byte": 18192, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 33, "usage_type": "call"}, {"api_name": "util.utils.init_distributed_mode", "line_number": 153, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 154, "usage_type": "call"}, {"api_name": "util.utils.get_rank", "line_number": 157, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.manual_seed", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 159, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 160, "usage_type": "call"}, {"api_name": "models.build_model", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "attribute"}, {"api_name": "timm.utils.NativeScaler", "line_number": 183, "usage_type": "call"}, {"api_name": "util.utils.get_world_size", "line_number": 185, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 185, "usage_type": "name"}, {"api_name": "timm.loss.LabelSmoothingCrossEntropy", "line_number": 192, "usage_type": "call"}, {"api_name": "timm.loss.SoftTargetCrossEntropy", "line_number": 196, "usage_type": "call"}, {"api_name": "timm.loss.LabelSmoothingCrossEntropy", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 200, "usage_type": "attribute"}, {"api_name": "timm.models.create_model", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.hub.load_state_dict_from_url", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 214, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 217, "usage_type": "call"}, {"api_name": "util.losses.DistillationLoss", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 227, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 229, "usage_type": "attribute"}, {"api_name": "torch.optim.AdamW", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 232, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.CosineAnnealingLR", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 234, "usage_type": "attribute"}, {"api_name": "datasets.build_dataset", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.utils.data.DistributedSampler", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.utils.data.DistributedSampler", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 243, "usage_type": "attribute"}, {"api_name": "torch.utils.data.BatchSampler", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 245, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 248, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 252, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 257, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 259, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "torch.hub.load_state_dict_from_url", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 266, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 279, "usage_type": "attribute"}, {"api_name": "util.engine.evaluate", "line_number": 283, "usage_type": "call"}, {"api_name": "util.utils.is_main_process", "line_number": 284, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 284, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.hub.load_state_dict_from_url", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 290, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 293, "usage_type": "call"}, {"api_name": "util.utils.load_model", "line_number": 295, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 295, "usage_type": "name"}, {"api_name": "time.time", "line_number": 304, "usage_type": "call"}, {"api_name": "util.engine.train_one_epoch", "line_number": 310, "usage_type": "call"}, {"api_name": "util.engine.evaluate", "line_number": 330, "usage_type": "call"}, {"api_name": "util.utils.save_on_master", "line_number": 340, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 340, "usage_type": "name"}, {"api_name": "util.utils.is_main_process", "line_number": 357, "usage_type": "call"}, {"api_name": "util.utils", "line_number": 357, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 359, "usage_type": "call"}, {"api_name": "time.time", "line_number": 361, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 362, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 371, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 374, "usage_type": "call"}]} +{"seq_id": "43201417664", "text": "import sqlmodel\nfrom aiogram.types import Chat\nfrom sqlalchemy.exc import NoResultFound\n\nfrom yakusoku import sql\nfrom yakusoku.archive.models import GroupData\n\n\nclass GroupManager:\n def __init__(self) -> None:\n pass\n\n async def update_group_from_chat(self, chat: Chat) -> GroupData:\n try:\n data = await self.get_group(chat.id)\n data.update_from_chat(chat)\n except NoResultFound:\n data = GroupData.from_chat(chat)\n await self.update_group(data)\n return data\n\n async def update_group(self, group: GroupData) -> None:\n async with sql.session() as session:\n session.add(group)\n await session.commit()\n await session.refresh(group)\n\n async def get_group(self, id: int) -> GroupData:\n async with sql.session() as session:\n statement = sqlmodel.select(GroupData).where(GroupData.id == id)\n results = await session.execute(statement)\n return results.one()[0]\n\n async def get_groups(self) -> list[GroupData]:\n async with sql.session() as session:\n statement = sqlmodel.select(GroupData)\n results = await session.execute(statement)\n return [row[0] for row in results.all()]\n\n async def remove_group(self, id: int) -> None:\n async with sql.session() as session:\n statement = sqlmodel.select(GroupData).where(GroupData.id == id)\n results = await session.execute(statement)\n await session.delete(results.one()[0])\n await session.commit()\n\n async def add_member(self, group: int, member: int) -> None:\n data = await self.get_group(group)\n if member in data.members:\n return\n members = list(data.members)\n members.append(member)\n data.members = members\n await self.update_group(data)\n\n async def remove_member(self, group: int, member: int) -> None:\n data = await self.get_group(group)\n if member not in data.members:\n return\n members = list(data.members)\n members.remove(member)\n data.members = members\n await self.update_group(data)\n", "repo_name": "ricky8955555/yakusoku", "sub_path": "yakusoku/archive/group.py", "file_name": "group.py", "file_ext": "py", "file_size_in_byte": 2190, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "61", "api": [{"api_name": "aiogram.types.Chat", "line_number": 13, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.NoResultFound", "line_number": 17, "usage_type": "name"}, {"api_name": "yakusoku.archive.models.GroupData.from_chat", "line_number": 18, "usage_type": "call"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 18, "usage_type": "name"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 13, "usage_type": "name"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 22, "usage_type": "name"}, {"api_name": "yakusoku.sql.session", "line_number": 23, "usage_type": "call"}, {"api_name": "yakusoku.sql", "line_number": 23, "usage_type": "name"}, {"api_name": "yakusoku.sql.session", "line_number": 29, "usage_type": "call"}, {"api_name": "yakusoku.sql", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlmodel.select", "line_number": 30, "usage_type": "call"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 30, "usage_type": "argument"}, {"api_name": "yakusoku.archive.models.GroupData.id", "line_number": 30, "usage_type": "attribute"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 28, "usage_type": "name"}, {"api_name": "yakusoku.sql.session", "line_number": 35, "usage_type": "call"}, {"api_name": "yakusoku.sql", "line_number": 35, "usage_type": "name"}, {"api_name": "sqlmodel.select", "line_number": 36, "usage_type": "call"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 36, "usage_type": "argument"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 34, "usage_type": "name"}, {"api_name": "yakusoku.sql.session", "line_number": 41, "usage_type": "call"}, {"api_name": "yakusoku.sql", "line_number": 41, "usage_type": "name"}, {"api_name": "sqlmodel.select", "line_number": 42, "usage_type": "call"}, {"api_name": "yakusoku.archive.models.GroupData", "line_number": 42, "usage_type": "argument"}, {"api_name": "yakusoku.archive.models.GroupData.id", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "29624745566", "text": "import torch\r\nfrom torch import nn\r\nimport torchvision as vision\r\n\r\ndef conv3x3(in_, out):\r\n return nn.Conv2d(in_, out, 3, padding=1)\r\n\r\nclass ConvRelu(nn.Module):\r\n def __init__(self, in_, out):\r\n super().__init__()\r\n self.conv = conv3x3(in_, out)\r\n self.activation = nn.ReLU(inplace=True)\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.activation(x)\r\n return x\r\n\r\nclass DecoderBlockV2(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):\r\n super(DecoderBlockV2, self).__init__()\r\n self.in_channels = in_channels\r\n\r\n if is_deconv:\r\n \"\"\"\r\n Paramaters for Deconvolution were chosen to avoid artifacts, following\r\n link https://distill.pub/2016/deconv-checkerboard/\r\n \"\"\"\r\n\r\n self.block = nn.Sequential(\r\n ConvRelu(in_channels, middle_channels),\r\n nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,\r\n padding=1),\r\n nn.ReLU(inplace=True)\r\n )\r\n else:\r\n self.block = nn.Sequential(\r\n nn.Upsample(scale_factor=2, mode='bilinear'),\r\n ConvRelu(in_channels, middle_channels),\r\n ConvRelu(middle_channels, out_channels),\r\n )\r\n\r\n def forward(self, x):\r\n return self.block(x)\r\n\r\nclass AlbuNet(nn.Module):\r\n \"\"\"\r\n UNet (https://arxiv.org/abs/1505.04597) with Resnet34(https://arxiv.org/abs/1512.03385) encoder\r\n Proposed by Alexander Buslaev: https://www.linkedin.com/in/al-buslaev/\r\n \"\"\"\r\n\r\n def __init__(self, num_filters=32, pretrained=False, is_deconv=False):\r\n \"\"\"\r\n :param num_classes:\r\n :param num_filters:\r\n :param pretrained:\r\n False - no pre-trained network is used\r\n True - encoder is pre-trained with resnet34\r\n :is_deconv:\r\n False: bilinear interpolation is used in decoder\r\n True: deconvolution is used in decoder\r\n \"\"\"\r\n super().__init__()\r\n# self.num_classes = num_classes\r\n\r\n self.pool = nn.MaxPool2d(2, 2)\r\n\r\n self.encoder = vision.models.resnet34(pretrained=pretrained)\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n self.conv1 = nn.Sequential(self.encoder.conv1,\r\n self.encoder.bn1,\r\n self.encoder.relu,\r\n self.pool)\r\n\r\n self.conv2 = self.encoder.layer1\r\n\r\n self.conv3 = self.encoder.layer2\r\n\r\n self.conv4 = self.encoder.layer3\r\n\r\n self.conv5 = self.encoder.layer4\r\n \r\n self.conv6 = nn.Sequential(\r\n nn.Conv2d(512, 256, kernel_size=3, stride=2, padding=1),\r\n nn.LeakyReLU(inplace=True))\r\n\r\n self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv)\r\n\r\n self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)\r\n self.dec4 = DecoderBlockV2(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)\r\n self.dec3 = DecoderBlockV2(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)\r\n self.dec2 = DecoderBlockV2(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)\r\n# self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)\r\n# self.dec0 = ConvRelu(num_filters, num_filters)\r\n# self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)\r\n\r\n def forward(self, x):\r\n conv1 = self.conv1(x)\r\n conv2 = self.conv2(conv1)\r\n conv3 = self.conv3(conv2)\r\n conv4 = self.conv4(conv3)\r\n conv5 = self.conv5(conv4)\r\n\r\n # center = self.center(self.conv6(conv5))\r\n # dec5 = self.dec5(torch.cat([center, conv5], 1))\r\n \r\n out = self.conv6(conv5)\r\n return out\r\n\r\n# dec4 = self.dec4(torch.cat([dec5, conv4], 1))\r\n# dec3 = self.dec3(torch.cat([dec4, conv3], 1))\r\n# dec2 = self.dec2(torch.cat([dec3, conv2], 1))\r\n# dec1 = self.dec1(dec2)\r\n# dec0 = self.dec0(dec1)\r\n\r\n# if self.num_classes > 1:\r\n# x_out = F.log_softmax(self.final(dec0), dim=1)\r\n# else:\r\n# x_out = self.final(dec0)\r\n # return dec5\r\n\r\n\r\nclass RNN_Decoder(nn.Module):\r\n def __init__(self, samples_size, input_size, hidden_size, linear_output_size, decode_times):\r\n super().__init__()\r\n self.samples_size = samples_size\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.times = decode_times\r\n self.output_size = linear_output_size\r\n \r\n self.lstm = nn.LSTMCell(self.input_size, self.hidden_size)\r\n# self.leaky1 = nn.LeakyReLU()\r\n# self.bn1 = nn.BatchNorm1d(self.hidden_size)\r\n# self.dropout1 = nn.Dropout(0.1)\r\n# self.linear1 = nn.Linear(self.hidden_size, 256)\r\n \r\n# self.leaky2 = nn.LeakyReLU()\r\n# self.bn2 = nn.BatchNorm1d(256)\r\n# self.dropout2 = nn.Dropout(0.1)\r\n# self.linear2 = nn.Linear(256, self.output_size)\r\n self.leaky1 = nn.LeakyReLU()\r\n# self.bn1 = nn.BatchNorm1d(self.hidden_size)\r\n self.dropout1 = nn.Dropout(0.1)\r\n self.linear1 = nn.Linear(self.hidden_size, linear_output_size)\r\n\r\n def set_decode_times(self, times):\r\n self.times = times\r\n\r\n def forward(self, x, hidden):\r\n \"\"\"x : BSx256x40x40\"\"\"\r\n hx, cx = hidden\r\n bs, c, h, w = x.shape\r\n x = x.permute(0, 2, 3, 1)\r\n # x: (BSx40x40)x256\r\n x = x.reshape(-1, x.shape[-1])\r\n \r\n output = []\r\n for i in range(self.times):\r\n hx, cx = self.lstm(x, [hx, cx])\r\n output.append(hx.unsqueeze(0))\r\n \r\n # linear_input: timesx(BSx40x40)xhidden_size\r\n linear_input = torch.cat(output, dim=0)\r\n # (timesx(BSx40x40))xhidden_size\r\n x = linear_input.reshape(-1, linear_input.shape[-1])\r\n x = self.dropout1(x)\r\n x = self.leaky1(x)\r\n# x = self.bn1(x)\r\n x = self.linear1(x)\r\n# x = self.dropout2(x)\r\n# x = self.leaky2(x)\r\n# x = self.bn2(x)\r\n# x = self.linear2(x)\r\n # TODO: x: (timesx(BSx40x40))x85 -> timesxBSx40x40x85 -> BSx40x40xtimesx85\r\n x = x.reshape(self.times, bs, h, w, -1)\r\n # BSx40x40xtimesx85\r\n x = x.permute(1, 2, 3, 0, 4)\r\n# x = x.reshape(bs, h*w*self.times, -1)\r\n return x\r\n \r\n def init_hidden_state(self, sample_size):\r\n self.samples_size = sample_size\r\n return torch.zeros(self.samples_size, self.hidden_size), torch.zeros(self.samples_size, self.hidden_size)\r\n\r\n\r\nclass DetectNet(nn.Module):\r\n def __init__(self, rpn_model, detect_model, scale=1):\r\n super().__init__()\r\n self.rpn = rpn_model\r\n self.detect_model = detect_model\r\n self.scale = scale\r\n \r\n def forward(self, x, hidden):\r\n rpn_output = self.rpn(x)\r\n detect_output = self.detect_model(rpn_output*self.scale, hidden)\r\n return detect_output\r\n \r\n def init_rnn_state(self, sample_size):\r\n return self.detect_model.init_hidden_state(sample_size)\r\n", "repo_name": "ababycat/playground", "sub_path": "detection/detect/architecture.py", "file_name": "architecture.py", "file_ext": "py", "file_size_in_byte": 7397, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Conv2d", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Upsample", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torchvision.models.resnet34", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 125, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.LSTMCell", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 146, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 189, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}]} +{"seq_id": "7843834459", "text": "#!/usr/bin/env python3\n\nfrom threading import Thread, Timer\nimport time\nimport requests\nimport os\nimport shelve\n\nfrom .statemanager import SystemState\nfrom .looptimer import LoopTimer\n\n\nclass TelegramAuthenticateBot:\n\n \"\"\"This is a very simple authentication bot like WeChat. The user scans a\n QRCode containing a token. The token as init parameter will be sent via\n user's Telegram to this bot, which will verify that the token is what we\n wanted, and the user is someone we knew. If all works, the bot excites the\n background server. And the server tells all connected clients about that\n incident.\n\n Although Telegram does provide much simpler process(login widget on web\n page), there're some considerations to prefer using this method:\n \n 1. Web login stores cookie, which is used to simplify afterward logins.\n BUT for our application, it's better preferred to ask the user\n authenticate AGAIN, since that will produce a notification on user's\n phone, which is a signal on system being excited. Telegram doesn't\n provide a reliable way to revoke a user's token, making this process\n a bit insecure.\n 2. The Telegram javascript for above login contains \"eval\" in JS, which\n is not much wanted when we want to apply Content Security Policy.\n 3. By forcing the user to contact our Bot first, we can notify user\n on further logins actively. (Web login by Telegram may in principle\n have same feature, but currently not works).\n\n This authentication bot method is different from that by WeChat, in that\n there's no real distinction between each client: We just want to make sure\n the system is excited, \"By whom?\" is not important. THEREFORE we'll ask all\n users to scan the same token. But anyway, only the users preconfigured in\n YAML can actually excite the system.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Sends information about system status via Telegram. This class\n checks for system status every 1 minute, and will remind the user\n of excited status every 5 minutes from the first excitation.\"\"\"\n self.cachePath = config.workdir(\"telegram.cache\")\n self.config = config.idProviders[\"telegram\"]\n\n self.sending = False\n self.apiURL = \\\n lambda i: \"https://api.telegram.org/bot%s/%s\" % (\n self.config[\"token\"], i)\n self.__pollNext = False\n self.__lastUpdateID = 0\n self.__outgoingQueue = []\n self.__outgoingQueuePurger = LoopTimer(\n self.__purgeSendingQueue, interval=1)\n\n self.__recognizedTokens = []\n self.__tokenRotater = LoopTimer(self.__rotateToken, interval=30)\n\n self.onTokenVerified = lambda: None \n self.statemanager = None\n\n self.lastRemindState = SystemState.UNKNOWN \n self.__stateReminder = LoopTimer(self.__remindState, interval=30)\n self.__excitedReminder = LoopTimer(self.__remindExcited, interval=300)\n self.__groundReminder = LoopTimer(self.__remindGround, interval=21600)\n\n\n def __rotateToken(self, purge=False):\n \"\"\"Generates a new token for login, and revoke an old one.\"\"\"\n count = 1 if not purge else 2\n for i in range(0, count):\n self.__recognizedTokens.append(os.urandom(16).hex())\n if len(self.__recognizedTokens) > 2:\n self.__recognizedTokens = self.__recognizedTokens[-2:]\n\n @property\n def token(self):\n return self.__recognizedTokens[-1]\n\n def __composeMessage(self, receiverChatID, message):\n self.__outgoingQueue.append({\n \"chat_id\": receiverChatID,\n \"text\": message,\n })\n\n def __purgeSendingQueue(self):\n url = self.apiURL(\"sendMessage\")\n while self.__outgoingQueue:\n message = self.__outgoingQueue.pop(0)\n for i in range(0, 10):\n try:\n req = requests.post(url, data=message)\n result = req.json()\n assert result[\"ok\"]\n break\n except Exception as e:\n print(\"Failed sending a message: %s\" % e)\n time.sleep(5)\n\n def __processUpdateMessage(self, message):\n \"\"\"\n {'message_id': 2, 'from': {'id': *****, 'is_bot': False,\n 'first_name': 'NeoAtlantis', 'username': 'NeoAtlantis',\n 'language_code': 'de-DE'}, 'chat': {'id': ****, 'first_name':\n 'NeoAtlantis', 'username': 'NeoAtlantis', 'type': 'private'}, 'date':\n ****, 'text': 'hi'}\"\"\"\n msgFrom = message[\"from\"]\n msgChat = message[\"chat\"]\n if msgFrom[\"username\"] not in self.config[\"users\"]:\n return\n else:\n self.recentChats[msgFrom[\"username\"]] = msgChat[\"id\"]\n if msgFrom[\"is_bot\"]: return\n if msgChat[\"type\"] != \"private\": return\n if \"text\" not in message: return\n if \"date\" not in message: return\n text, date = message[\"text\"], message[\"date\"]\n if text.startswith(\"/start\"):\n token = text[6:].strip()\n if token and token in self.__recognizedTokens:\n print(\"Token is valid.\")\n self.onTokenVerified()\n self.__composeMessage(msgChat[\"id\"], \"Your token is valid.\")\n self.__rotateToken(purge=True)\n else:\n self.__composeMessage(msgChat[\"id\"], \"Token invalid. Try again.\")\n\n def __pollUpdate(self):\n if not self.__pollNext: return\n interval = 0\n\n updates = []\n try:\n url = self.apiURL(\"getUpdates\")\n req = requests.get(\n url,\n data={\"offset\": self.__lastUpdateID+1},\n timeout=5\n )\n assert req.status_code == 200\n json = req.json()\n assert json[\"ok\"]\n updates = json[\"result\"]\n except Exception as e:\n print(\"Failed pulling updates from telegram. Wait 5 seconds...\")\n print(e)\n interval = 5\n\n for update in updates:\n try:\n update_id = update[\"update_id\"]\n message = update[\"message\"]\n self.__lastUpdateID = max(update_id, self.__lastUpdateID)\n self.__processUpdateMessage(message)\n except Exception as e:\n print(e)\n \n if self.__pollNext:\n self.pollThread = Timer(interval, self.__pollUpdate)\n self.pollThread.start()\n\n\n def __remindState(self):\n \"\"\"Trigger state reminding service.\"\"\"\n if not self.statemanager: return\n newState = self.statemanager.reportState()\n if newState == self.lastRemindState: return\n \n if newState == SystemState.EXCITED:\n self.__excitedReminder.start()\n self.__groundReminder.stop()\n elif newState == SystemState.GROUND:\n self.__excitedReminder.stop()\n self.__groundReminder.start()\n elif newState == SystemState.UNKNOWN:\n self.__excitedReminder.stop()\n self.__groundReminder.stop()\n else:\n for each in self.recentChats:\n self.__composeMessage(\n self.recentChats[each], \n \"System decayed!\"\n )\n self.lastRemindState = newState\n\n def __remindExcited(self):\n for each in self.recentChats:\n self.__composeMessage(\n self.recentChats[each], \n \"System is excited!\"\n )\n\n def __remindGround(self):\n for each in self.recentChats:\n self.__composeMessage(\n self.recentChats[each], \n \"System is in ground state.\"\n )\n\n \n \n\n def __enter__(self, *args):\n self.recentChats = shelve.open(self.cachePath, writeback=True)\n self.__stateReminder.start()\n self.__outgoingQueuePurger.start()\n self.__tokenRotater.start()\n self.__pollNext = True\n self.pollThread = Thread(target=self.__pollUpdate)\n self.pollThread.start()\n return self\n\n def __exit__(self, *args):\n self.recentChats.close()\n self.__stateReminder.stop()\n self.__outgoingQueuePurger.stop()\n self.__tokenRotater.stop()\n self.__pollNext = False\n", "repo_name": "neoatlantis-security-solutions/vnmk", "sub_path": "vnmk/server/telegram.py", "file_name": "telegram.py", "file_ext": "py", "file_size_in_byte": 8400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "61", "api": [{"api_name": "looptimer.LoopTimer", "line_number": 58, "usage_type": "call"}, {"api_name": "looptimer.LoopTimer", "line_number": 62, "usage_type": "call"}, {"api_name": "statemanager.SystemState.UNKNOWN", "line_number": 67, "usage_type": "attribute"}, {"api_name": "statemanager.SystemState", "line_number": 67, "usage_type": "name"}, {"api_name": "looptimer.LoopTimer", "line_number": 68, "usage_type": "call"}, {"api_name": "looptimer.LoopTimer", "line_number": 69, "usage_type": "call"}, {"api_name": "looptimer.LoopTimer", "line_number": 70, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 77, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 97, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 140, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 164, "usage_type": "call"}, {"api_name": "statemanager.SystemState.EXCITED", "line_number": 174, "usage_type": "attribute"}, {"api_name": "statemanager.SystemState", "line_number": 174, "usage_type": "name"}, {"api_name": "statemanager.SystemState.GROUND", "line_number": 177, "usage_type": "attribute"}, {"api_name": "statemanager.SystemState", "line_number": 177, "usage_type": "name"}, {"api_name": "statemanager.SystemState.UNKNOWN", "line_number": 180, "usage_type": "attribute"}, {"api_name": "statemanager.SystemState", "line_number": 180, "usage_type": "name"}, {"api_name": "shelve.open", "line_number": 209, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "35225091537", "text": "from urllib import request\nfrom flask_app import app\nfrom flask import render_template, redirect, flash, session, request\nfrom flask_app.models.appointment import Appointment\nfrom flask_app.models.user import User\n\n@app.route('/appointments')\ndef myappointments():\n if 'user_id' not in session:\n return redirect('/logout')\n data = {\"id\":session['user_id']}\n user = User.get_by_id(data)\n # all_appointments = Appointment.get_all()\n # all_appointments = Appointment.get_all_where_user_is(data)\n all_appointments = Appointment.get_pending_where_user_is(data)\n all_past_appointments = Appointment.get_all_past_where_user_is(data)\n return render_template(\"appointments.html\", user=user, all_appointments=all_appointments, all_past=all_past_appointments)\n\n@app.route(\"/appointments/add\")\ndef appointments_form():\n if 'user_id' not in session:\n return redirect('/logout')\n data = {\"id\":session['user_id']}\n return render_template(\"create_appointment.html\", user=User.get_by_id(data))\n\n@app.route(\"/create-appointment\", methods = [\"POST\"])\ndef create_appointment():\n if 'user_id' not in session:\n return redirect('/logout')\n if not Appointment.validate_register(request.form):\n return redirect('/appointments/add')\n data = {\n \"task\":request.form[\"task\"],\n \"date\":request.form[\"date\"],\n \"status\":request.form[\"status\"],\n \"user_id\": session[\"user_id\"]\n }\n print(request.form)\n print(\"Fechas\", data['date'])\n Appointment.save(data)\n return redirect('/appointments')\n\n@app.route('/appointments/delete/')\ndef delete_appointment(id):\n Appointment.delete_by_id({\"id\":id})\n return redirect('/appointments')\n\n@app.route('/appointments/edit/')\ndef render_update_form(id):\n if 'user_id' not in session:\n return redirect('/logout')\n all_about_appointment = Appointment.get_by_id({\"id\":id})\n session['appointment_id'] = id\n return render_template(\"edit_appointment.html\", this=all_about_appointment, user = User.get_by_id({\"id\":session['user_id']}))\n\n\n@app.route('/edit-appointment', methods=['POST'])\ndef update_appointment():\n if 'user_id' not in session:\n return redirect('/logout')\n appointment_id = session['appointment_id']\n if not Appointment.validate_register(request.form):\n return redirect(f'/appointments/edit/{appointment_id}')\n data = {\n \"id\": appointment_id,\n \"task\":request.form[\"task\"],\n \"date\":request.form[\"date\"],\n \"status\":request.form[\"status\"],\n \"user_id\": session[\"user_id\"]\n }\n Appointment.update(data)\n return redirect('/appointments')\n", "repo_name": "LeninGF/appointments_app", "sub_path": "flask_app/controllers/appointments.py", "file_name": "appointments.py", "file_ext": "py", "file_size_in_byte": 2664, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.session", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_app.models.user.User.get_by_id", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 12, "usage_type": "name"}, {"api_name": "flask_app.models.appointment.Appointment.get_pending_where_user_is", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 15, "usage_type": "name"}, {"api_name": "flask_app.models.appointment.Appointment.get_all_past_where_user_is", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_app.models.user.User.get_by_id", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 24, "usage_type": "name"}, {"api_name": "flask_app.app.route", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment.validate_register", "line_number": 30, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask_app.models.appointment.Appointment.save", "line_number": 40, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "flask_app.models.appointment.Appointment.delete_by_id", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 43, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment.get_by_id", "line_number": 52, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "flask_app.models.user.User.get_by_id", "line_number": 54, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "name"}, {"api_name": "flask_app.app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 61, "usage_type": "name"}, {"api_name": "flask_app.models.appointment.Appointment.validate_register", "line_number": 62, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 69, "usage_type": "name"}, {"api_name": "flask_app.models.appointment.Appointment.update", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_app.models.appointment.Appointment", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 57, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "27652413484", "text": "import os\n\nimport keras\nimport numpy\nfrom keras import Sequential\nfrom keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense\n\nimport hybrid_output as log\nfrom training.data import prepare_training_sets\n\n\n\nclass NeuralNetwork:\n def __init__(self):\n self.model = Sequential()\n self.prepare_network()\n\n def load_weights_if_they_exist(self):\n if os.path.exists(\"weights.HDF5\"):\n self.model = keras.models.load_model(\"weights.HDF5\")\n\n def prepare_network(self):\n self.model.add(Conv2D(32, 3, input_shape=(20, 10, 1)))\n self.model.add(Activation('relu'))\n self.model.add(Conv2D(32, 3))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=2))\n self.model.add(Flatten())\n self.model.add(Dense(units=256, activation='relu'))\n self.model.add(Dense(units=64, activation='relu'))\n self.model.add(Dense(units=1, activation='linear'))\n self.model.compile(loss='mean_squared_logarithmic_error',\n optimizer='RMSProp')\n self.load_weights_if_they_exist()\n\n def train(self, episodes):\n x_batch, y_batch = episodes.unroll()\n log.debug(\"Doing training against \" + str(len(x_batch)) + \" items.\")\n x_train, y_train, x_test, y_test = prepare_training_sets(x_batch, y_batch)\n training_result = self.model.fit(x_train, y_train)\n log.out(\"error = \", training_result.history[\"loss\"][0])\n network_evaluation = self.model.evaluate(x_test, y_test)\n log.out(\"evaluation error = \", network_evaluation)\n self.model.save(\"weights.HDF5\")\n\n def evaluate(self, game):\n activations = game.flatten()\n activations = numpy.expand_dims(activations, axis=0)\n prediction = self.model.predict(activations)\n return prediction\n", "repo_name": "cohen990/py-tetris", "sub_path": "training/neural_network.py", "file_name": "neural_network.py", "file_ext": "py", "file_size_in_byte": 1851, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.Sequential", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "keras.models.load_model", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 20, "usage_type": "attribute"}, {"api_name": "keras.layers.Conv2D", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "hybrid_output.debug", "line_number": 38, "usage_type": "call"}, {"api_name": "training.data.prepare_training_sets", "line_number": 39, "usage_type": "call"}, {"api_name": "hybrid_output.out", "line_number": 41, "usage_type": "call"}, {"api_name": "hybrid_output.out", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "23369815061", "text": "import torch\nimport sys\nimport pickle\nimport numpy as np\n\nsys.path.append(\"MTR/tools\")\n\nfrom train import parse_config\n\nfrom mtr.models import model as model\nfrom mtr.utils import common_utils\nfrom mtr.datasets import build_dataloader\n\n# --cfg_file MTR/tools/cfgs/waymo/mtr_single_file_vis.yaml\n\n# checkpoint_path = \"MTR\\output\\tools\\cfgs\\waymo\\mtr_weak\\my_first_exp\"\ncheckpoint_path = \"../checkpoint_epoch_1.pth\"\ndata_path = \"../data\"\ndata_filename = \"sample_867dd000677d389.pkl\"\n\ndef load_model(checkpoint_path=checkpoint_path):\n # checkpoint = torch.load(checkpoint_path)\n args, cfg = parse_config()\n logger = common_utils.create_logger(\"log.txt\", rank=cfg.LOCAL_RANK) # create logger\n model_ = model.MotionTransformer(config=cfg.MODEL)\n model_.load_params_from_file(checkpoint_path, logger=logger, to_cpu=False)\n model_.eval()\n return model_\n\ndef dataloader():\n args, cfg = parse_config()\n logger = common_utils.create_logger(\"log_vis.txt\", rank=cfg.LOCAL_RANK) # create logger\n dataset, dataloader_, sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n batch_size=cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU,\n dist=False, workers=1, logger=logger,\n training=False, merge_all_iters_to_one_epoch=False, total_epochs=cfg.OPTIMIZATION.NUM_EPOCHS,\n add_worker_init_fn=True\n )\n return dataset, dataloader_, sampler\n\ndef create_pickle(filename):\n data_file = open(data_path + \"/processed_scenarios_training/\" + filename, 'rb')\n data = pickle.load(data_file)\n data_file.close()\n # create a dict with these keys 'scenario_id', 'timestamps_seconds', 'current_time_index', 'sdc_track_index', 'objects_of_interest', 'tracks_to_predict'\n # and values from data\n dict = {}\n dict['scenario_id'] = data['scenario_id']\n dict['timestamps_seconds'] = data['timestamps_seconds']\n dict['current_time_index'] = data['current_time_index']\n dict['sdc_track_index'] = data['sdc_track_index']\n dict['objects_of_interest'] = data['objects_of_interest']\n dict['tracks_to_predict'] = data['tracks_to_predict']\n list = [dict]\n pickle.dump(list, open(data_path + \"/processed_scenarios_single_file_vis.pkl\", 'wb'))\n\n\n\ndef apply_model(model_):\n dataset, dataloader_, sampler = dataloader()\n i, batch_dict = next(enumerate(dataloader_))\n with torch.no_grad():\n batch_pred_dicts = model_(batch_dict)\n final_pred_dicts = dataset.generate_prediction_dicts(batch_pred_dicts, output_path=None)\n return final_pred_dicts\n\ndef apply(data_filename=data_filename, checkpoint_path=checkpoint_path):\n model_ = load_model(checkpoint_path=checkpoint_path).cuda()\n create_pickle(data_filename)\n result = apply_model(model_)\n return result", "repo_name": "tudoroancea/civil_459_project", "sub_path": "apply_model.py", "file_name": "apply_model.py", "file_ext": "py", "file_size_in_byte": 2745, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "train.parse_config", "line_number": 23, "usage_type": "call"}, {"api_name": "mtr.utils.common_utils.create_logger", "line_number": 24, "usage_type": "call"}, {"api_name": "mtr.utils.common_utils", "line_number": 24, "usage_type": "name"}, {"api_name": "mtr.models.model.MotionTransformer", "line_number": 25, "usage_type": "call"}, {"api_name": "mtr.models.model", "line_number": 25, "usage_type": "name"}, {"api_name": "train.parse_config", "line_number": 31, "usage_type": "call"}, {"api_name": "mtr.utils.common_utils.create_logger", "line_number": 32, "usage_type": "call"}, {"api_name": "mtr.utils.common_utils", "line_number": 32, "usage_type": "name"}, {"api_name": "mtr.datasets.build_dataloader", "line_number": 33, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 44, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "10899155354", "text": "from PIL import Image, ImageDraw\n\nimage = Image.open(\"/4 semestr/comp_grafic/Lab_1_Jordan_Method _1.1/files/roof.JPG\")\ndraw = ImageDraw.Draw(image)\nwidth = image.size[0]\nheight = image.size[1]\nfor x in range(80, width, 160):\n for y in range(height):\n for x1 in range(x,x+80):\n draw.point((x1,y),(0,255,128))\nimage.show()\n#image.save(\"first.jpg\")\ndel draw\n", "repo_name": "Saykon-k/pit", "sub_path": "4_semester/comp_grafic/labS/labS_pil/1.1_praktika/1_task.py", "file_name": "1_task.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PIL.Image.open", "line_number": 3, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 3, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 4, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "12265446639", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 20 12:53:19 2023\n\n@author: EvgenyGalimov\n\"\"\"\n\n\n\nimport pandas as pd\nfrom datetime import datetime\nimport datetime\nfrom fbprophet import Prophet\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\nfrom sklearn.metrics import mean_squared_error, mean_absolute_percentage_error, mean_absolute_error\nfrom fbprophet.diagnostics import cross_validation\nimport os\nimport shutil\n\n# set max string display - to convert pandas series to string\npd.set_option(\"display.max_colwidth\", 10000)\n\n# set path\npath = 'C:/Users/EvgenyGalimov/OneDrive - Imperial College Health Partners/Documents/Docs/22_Endoscopy_forcasting_demand/'\n\n# load the file with parameters\nparams = pd.read_excel(path+'params2_5y_Prophet_2020oct.xlsx', sheet_name = 'params')\n\n\n\n\n\n\n# Master function to perform prophet analysis and save the results\ndef run_prophet(path, folder, file, start_w, end_w, save_folder, test_data_points, forecast_period):\n # load the data\n data = pd.read_csv(path+folder+file+'.csv')\n\n # convert string to Timestamp and rename columns\n data['week_first_day']=pd.DatetimeIndex(data['week_first_day'])\n data=data.rename(columns={'week_first_day':'ds','n_visits':'y'})\n \n # copy data and limit start and end as in params table\n data2 = data[['ds','y']]\n data2_pc = data2[ (data2['ds']>=start_w) & (data2['ds']<=end_w) ]\n \n \n # plot original data and save it\n data2_pc['ds'] = pd.to_datetime(data2_pc['ds'])\n ax=data2_pc.set_index('ds').plot(figsize=(12,8))\n ax.set_ylabel('Weekly Number')\n ax.set_xlabel('Date')\n plt.savefig( path+save_folder+'___'+'1_original data.jpg', dpi=300, transparent = True, format='jpg')\n\n \n # split data into train and test\n data2_pc.shape[0]\n train_pc = data2_pc.iloc[0:data2_pc.shape[0]-test_data_points,:]\n test_pc = data2_pc.iloc[data2_pc.shape[0]-test_data_points:data2_pc.shape[0],:]\n\n print(f\"Number of weeks in train data: {len(train_pc)}\")\n print(f\"Number of weeks in test data: {len(test_pc)}\")\n \n \n ### with train/test split \n # train Prophet model\n p_mod_4=Prophet(interval_width=0.95)\n p_mod_4.fit(train_pc)\n \n # merge observed and forecast\n test_dates_pc = test_pc[['ds']]\n forecast4=p_mod_4.predict(test_dates_pc)\n forecast4[['ds','yhat','yhat_lower','yhat_upper']].tail()\n forecast4 = pd.merge(forecast4, data2_pc, how=\"left\", on=[\"ds\"])\n \n # get forecast metrics on the test period\n p_mod_4_test_metrics = pd.DataFrame({\n \"MSE\": [round( mean_squared_error(forecast4['yhat'], forecast4['y']), 0)],\n \"RMSE\": [round( mean_squared_error(forecast4['yhat'], forecast4['y'], squared = False), 0)],\n \"MAPE\": [round( mean_absolute_percentage_error(forecast4['yhat'], forecast4['y']), 2)],\n \"MAE\": [round( mean_absolute_error(forecast4['yhat'], forecast4['y']), 0)] })\n p_mod_4_test_metrics.to_csv(path+save_folder+'___'+'2_model1_metrics.csv')\n \n \n # train Prophet and predict using crossvalidation method\n p_mod_5=Prophet(interval_width=0.95)\n p_mod_5.fit(data2_pc)\n df_cv2 = cross_validation(p_mod_5, initial=str(len(train_pc)-1)+' W', period=str(test_data_points)+' W', horizon = str(test_data_points)+' W')\n \n p_mod_5.plot(df_cv2, uncertainty=True)\n plt.xticks(rotation=30)\n plt.xlabel(\"Date\")\n plt.ylabel(\"Estimated visits per week\")\n plt.text(df_cv2.iloc[0,0],df_cv2['y'].max(), \"MAE: \"+str(round( mean_absolute_error(df_cv2['yhat'], df_cv2['y']), 0)), fontsize = 15)\n plt.savefig( path+save_folder+'___'+'3_model2_cv.jpg', dpi=300, transparent = True, format='jpg')\n\n # get forecast metrics on the test period\n p_mod_5_test_metrics = pd.DataFrame({\n \"MSE\": [round( mean_squared_error(df_cv2['yhat'], df_cv2['y']), 0)],\n \"RMSE\": [round( mean_squared_error(df_cv2['yhat'], df_cv2['y'], squared = False), 0)],\n \"MAPE\": [round( mean_absolute_percentage_error(df_cv2['yhat'], df_cv2['y']), 2)],\n \"MAE\": [round( mean_absolute_error(df_cv2['yhat'], df_cv2['y']), 0)] })\n p_mod_5_test_metrics.to_csv(path+save_folder+'___'+'4_model2_metrics.csv')\n\n \n # make forecast for the next 5 years\n future_dates2 = p_mod_5.make_future_dataframe(periods=forecast_period,freq='MS')\n forecast5=p_mod_5.predict(future_dates2)\n forecast5 = pd.merge(forecast5, data2_pc, how=\"left\", on=[\"ds\"])\n\n # plot the forecast\n forecast5[['ds','yhat','yhat_lower','yhat_upper']].tail()\n p_mod_5.plot(forecast5, uncertainty=True)\n plt.text(df_cv2.iloc[0,0],df_cv2['y'].max(), \"MAE: \"+str(round( mean_absolute_error(df_cv2['yhat'], df_cv2['y']), 0)), fontsize = 15)\n plt.savefig( path+save_folder+'___'+'5_model2_forecast.jpg', dpi=300, transparent = True, format='jpg')\n \n # plot component plot \n p_mod_5.plot_components(forecast5)\n plt.xticks(rotation=30)\n plt.savefig( path+save_folder+'___'+'6_model2_component_plot.jpg', dpi=300, transparent = True, format='jpg')\n\n # save the forecast table\n forecast5.to_csv(path+save_folder+'___'+'7_model2_forecast.csv')\n\n\n\n# error list\nerror_list = []\n\n# iterate over all sites/procedures and get Prophet analysis using run_prophet master function\nfor k in range(0, params.shape[0]):\n #k=0\n # load parameters from param file\n folder = params.iloc[k][['folder']].to_string(index = False)\n file = params.iloc[k][['file']].to_string(index = False)\n start_w = params.iloc[k][['start_w']].to_string(index = False)\n end_w = params.iloc[k][['end_w']].to_string(index = False)\n save_folder = params.iloc[k][['save_folder']].to_string(index = False)\n test_data_points = int(params.iloc[k][['test_data_points']])\n forecast_period = int(params.iloc[k][['forecast_period']])\n \n # create folder for saving\n if not os.path.exists(path+save_folder):\n os.makedirs(path+save_folder)\n \n # run master function to perform prophet analysis and save the results\n try: run_prophet(path, folder, file, start_w, end_w, save_folder, test_data_points, forecast_period)\n except: error_list.append(str(k)+': ' + str(ValueError) )\n\n\n\n\n\n# copying files\nif not os.path.exists(path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/0_all'):\n os.makedirs(path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/0_all')\n\nfiles = os.listdir(path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/')\nfor file in files:\n if '5_model2' in file.lower():\n shutil.copy(path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/'+file, path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/0_all/'+file)\n if '1_original' in file.lower():\n shutil.copy(path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/'+file, path+save_folder.split(\"/\",1)[0]+'/1_pred_Prophet/0_all/'+file)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "egalimov/Modeling-the-demand-of-gastrointestinal-endoscopic-procedures-in-North-West-London", "sub_path": "step3_prophet_cycle.py", "file_name": "step3_prophet_cycle.py", "file_ext": "py", "file_size_in_byte": 6913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "pandas.set_option", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "fbprophet.Prophet", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_percentage_error", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 82, "usage_type": "call"}, {"api_name": "fbprophet.Prophet", "line_number": 87, "usage_type": "call"}, {"api_name": "fbprophet.diagnostics.cross_validation", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_percentage_error", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 157, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 159, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 162, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "13348666604", "text": "from builtins import range\nimport sys\nsys.path.insert(1,\"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n\n\ndef offset_bernoulli_cars():\n # Connect to a pre-existing cluster\n cars = h2o.upload_file(pyunit_utils.locate(\"smalldata/junit/cars_20mpg.csv\"))\n cars = cars[cars[\"economy_20mpg\"].isna() == 0]\n cars[\"economy_20mpg\"] = cars[\"economy_20mpg\"].asfactor()\n offset = h2o.H2OFrame([[.5]]*398)\n offset.set_names([\"x1\"])\n cars = cars.cbind(offset)\n\n gbm = H2OGradientBoostingEstimator(ntrees=1,\n max_depth=1,\n min_rows=1,\n learn_rate=1)\n gbm.train(x=list(range(2,8)),y=\"economy_20mpg\", training_frame=cars, offset_column=\"x1\")\n\n predictions = gbm.predict(cars)\n\n # Comparison result generated from R's gbm:\n #\tgg = gbm(formula = economy_20mpg~cylinders+displacement+power+weight+acceleration+year+offset(rep(.5,398)),\n # distribution = \"bernoulli\",data = df,n.trees = 1,interaction.depth = 1,n.minobsinnode = 1,shrinkage = 1,\n # train.fraction = 1,bag.fraction = 1)\n # pr = predict.gbm(object = gg,newdata = df,n.trees = 1,type = \"link\")\n # pr = 1/(1+exp(-df$x1 - pr))\n assert abs(-0.1041234 - gbm._model_json['output']['init_f']) < 1e-6, \"expected init_f to be {0}, but got {1}\". \\\n format(-0.1041234, gbm._model_json['output']['init_f'])\n assert abs(0.577326 - predictions[:,2].mean().getrow()[0]) < 1e-6, \"expected prediction mean to be {0}, but got {1}\". \\\n format(0.577326, predictions[:,2].mean().getrow()[0])\n assert abs(0.1621461 - predictions[:,2].min()) < 1e-6, \"expected prediction min to be {0}, but got {1}\". \\\n format(0.1621461, predictions[:,2].min())\n assert abs(0.8506528 - predictions[:,2].max()) < 1e-6, \"expected prediction max to be {0}, but got {1}\". \\\n format(0.8506528, predictions[:,2].max())\n\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(offset_bernoulli_cars)\nelse:\n offset_bernoulli_cars()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_algos/gbm/pyunit_offset_bernoulli_cars_gbm.py", "file_name": "pyunit_offset_bernoulli_cars_gbm.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "h2o.upload_file", "line_number": 11, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 11, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 11, "usage_type": "name"}, {"api_name": "h2o.H2OFrame", "line_number": 14, "usage_type": "call"}, {"api_name": "h2o.estimators.gbm.H2OGradientBoostingEstimator", "line_number": 18, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 22, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 44, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "70226912195", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sqlite3\nfrom view.writter import Writter\n\n\nclass Database:\n\t\"\"\"database class to create an object for the sqlite file\"\"\"\n\n\n\n\tdef __init__(self , file='data/data.sqlite'):\n\t\tself.file = file\n\t\ttry:\n\t\t\tself.connection = sqlite3.connect(self.file)\n\t\t\tself.cursor = self.connection.cursor()\n\t\t\tself._init_tables()\n\t\texcept sqlite3.Error as e:\n\t\t\traise e\n\n\n\n\tdef __del__(self):\n\t\tself.cursor.close()\n\n\n\n\tdef _init_tables(self):\n\t\t\"\"\"check if tables exists and create them if not\"\"\"\n\t\ttry:\n\t\t\tself.cursor_execute(\"\"\" CREATE TABLE IF NOT EXISTS tasks( \n\t\t\t\t\t`id` INTEGER PRIMARY KEY,\n\t\t\t\t\t`node_id` INTEGER NOT NULL default 0,\n\t\t\t\t\t`name` TEXT NOT NULL default 'no name',\n\t\t\t\t\t`description` TEXT default '',\n\t\t\t\t\t`status` INTEGER DEFAULT 0\n\t\t\t\t\t) \"\"\" )\n\t\t\tself.cursor_execute(\"\"\" CREATE TABLE IF NOT EXISTS worktimes( \n\t\t\t\t\t`id` INTEGER PRIMARY KEY,\n\t\t\t\t\t`task_id` INTEGER NOT NULL,\n\t\t\t\t\t`begin` REAL NOT NULL,\n\t\t\t\t\t`end` REAL NOT NULL\n\t\t\t\t\t) \"\"\" )\n\t\texcept sqlite3.Error as e :\n\t\t\traise e\n\n\n\n\tdef cursor_execute(self, sql_query , data=None):\n\t\t\"\"\"an overwride function for `cursos.execute` to print each query\"\"\"\n\n\t\tWritter.sql_log(sql_query , data) \n\n\t\t# run SQL query normally\n\t\tif data:\n\t\t\treturn self.cursor.execute(sql_query, data)\n\t\telse:\n\t\t\treturn self.cursor.execute(sql_query)\n", "repo_name": "madeindjs/pomodore_manager", "sub_path": "classes/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 1333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 44, "usage_type": "attribute"}, {"api_name": "view.writter.Writter.sql_log", "line_number": 52, "usage_type": "call"}, {"api_name": "view.writter.Writter", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "5343708046", "text": "from sklearn import svm\r\nfrom sklearn.metrics import accuracy_score\r\nimport numpy as np\r\nfrom keras.datasets import mnist\r\nfrom skimage.transform import resize\r\n\r\n# Load the MNIST dataset\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n\r\n# Define the threshold value\r\nthreshold_value = 127\r\n\r\n# Threshold the images\r\nx_train_thresholded = np.where(x_train > threshold_value, 1, 0)\r\nx_test_thresholded = np.where(x_test > threshold_value, 1, 0)\r\n\r\n### BOUNDING BOX\r\n\r\ndef construct_bounding_box(image):\r\n # Compute row-wise and column-wise sums of the thresholded image\r\n row_sums = np.sum(image, axis=1)\r\n col_sums = np.sum(image, axis=0)\r\n\r\n # Find the range of ink pixels along each row and column\r\n row_nonzero = np.nonzero(row_sums)[0]\r\n col_nonzero = np.nonzero(col_sums)[0]\r\n if len(row_nonzero) == 0 or len(col_nonzero) == 0:\r\n return np.zeros((20, 20))\r\n row_range = row_nonzero[[0, -1]]\r\n col_range = col_nonzero[[0, -1]]\r\n\r\n # Compute the center of the ink pixel ranges\r\n row_center = (row_range[0] + row_range[-1]) / 2\r\n col_center = (col_range[0] + col_range[-1]) / 2\r\n\r\n # Compute starting and ending indices for the bounding box\r\n row_start = int(np.clip(row_center - 9, 0, image.shape[0] - 20))\r\n row_end = row_start + 20\r\n col_start = int(np.clip(col_center - 9, 0, image.shape[1] - 20))\r\n col_end = col_start + 20\r\n\r\n # Extract the bounding box from the image\r\n bounding_box = image[row_start:row_end, col_start:col_end]\r\n\r\n return bounding_box\r\n\r\ndef construct_bounding_box_stretched(image):\r\n # Compute row-wise and column-wise sums of the thresholded image\r\n row_sums = np.sum(image, axis=1)\r\n col_sums = np.sum(image, axis=0)\r\n\r\n # Find the range of ink pixels along each row and column\r\n row_nonzero = np.nonzero(row_sums)[0]\r\n col_nonzero = np.nonzero(col_sums)[0]\r\n if len(row_nonzero) == 0 or len(col_nonzero) == 0:\r\n return np.zeros((20, 20))\r\n\r\n # Compute the horizontal and vertical ink pixel ranges\r\n row_range = row_nonzero[[0, -1]]\r\n col_range = col_nonzero[[0, -1]]\r\n row_start, row_end = row_range[0], row_range[-1]\r\n col_start, col_end = col_range[0], col_range[-1]\r\n\r\n # Stretch the extracted image to 20x20 dimensions\r\n image = image[row_start:row_end, col_start:col_end]\r\n image = resize(image, (20, 20))\r\n\r\n return image\r\n\r\n\r\nx_train_bounding_box = np.zeros((len(x_train_thresholded), 20, 20))\r\nx_train_bounding_box_stretched = np.zeros((len(x_train_thresholded), 20, 20))\r\nfor i in range(len(x_train_thresholded)):\r\n x_train_bounding_box[i] = construct_bounding_box(x_train_thresholded[i])\r\n x_train_bounding_box_stretched[i] = construct_bounding_box_stretched(x_train_thresholded[i])\r\n\r\nx_test_bounding_box = np.zeros((len(x_test_thresholded), 20, 20))\r\nx_test_bounding_box_stretched = np.zeros((len(x_test_thresholded), 20, 20))\r\nfor i in range(len(x_test_thresholded)):\r\n x_test_bounding_box[i] = construct_bounding_box(x_test_thresholded[i])\r\n x_test_bounding_box_stretched[i] = construct_bounding_box_stretched(x_test_thresholded[i])\r\n\r\n\r\n\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\n# Flatten the images\r\nx_train_thresholded_flat = x_train_thresholded.reshape(len(x_train_thresholded), -1)\r\nx_test_thresholded_flat = x_test_thresholded.reshape(len(x_test_thresholded), -1)\r\nx_train_bounding_box_flat = x_train_bounding_box.reshape(len(x_train_bounding_box), -1)\r\nx_test_bounding_box_flat = x_test_bounding_box.reshape(len(x_test_bounding_box), -1)\r\nx_train_bounding_box_stretched_flat = x_train_bounding_box_stretched.reshape(len(x_train_bounding_box_stretched), -1)\r\nx_test_bounding_box_stretched_flat = x_test_bounding_box_stretched.reshape(len(x_test_bounding_box_stretched), -1)\r\n\r\n# Train and evaluate on thresholded images\r\nnb = GaussianNB()\r\n\r\nnb.fit(x_train_thresholded_flat, y_train)\r\ny_pred = nb.predict(x_test_thresholded_flat)\r\nacc_thresholded = accuracy_score(y_test, y_pred)\r\nprint(\"Accuracy on thresholded images:\", acc_thresholded)\r\n\r\n# Train and evaluate on bounding box images\r\nnb.fit(x_train_bounding_box_flat, y_train)\r\ny_pred = nb.predict(x_test_bounding_box_flat)\r\nacc_bounding_box = accuracy_score(y_test, y_pred)\r\nprint(\"Accuracy on bounding box images:\", acc_bounding_box)\r\n\r\n# Train and evaluate on stretched bounding box\r\nnb.fit(x_train_bounding_box_stretched_flat,y_train)\r\ny_pred = nb.predict(x_test_bounding_box_stretched_flat)\r\nacc_bounding_box_streched = accuracy_score(y_test,y_pred)\r\nprint(\"Accuracy for Bounding Box streched Image:\",acc_bounding_box_streched)\r\n\r\n### accuracy for train datapoints\r\n\r\nnb.fit(x_train_thresholded_flat,y_train)\r\ny_prred_train = nb.predict(x_train_thresholded_flat)\r\naccuracy_train = accuracy_score(y_train,y_prred_train)\r\nprint(\"Accuracy of threshsolded image for trained data:\", accuracy_train )\r\n\r\n# for bounding streched image\r\nnb.fit(x_train_bounding_box_stretched_flat,y_train)\r\ny_prd_train = nb.predict(x_train_bounding_box_stretched_flat)\r\naccuracy_train = accuracy_score(y_train,y_prd_train)\r\nprint(\"Accuracy of bounding box streched image for trained data:\", accuracy_train )\r\n\r\n# for bounding box image\r\nnb.fit(x_train_bounding_box_flat,y_train)\r\ny_prd_train = nb.predict(x_train_bounding_box_flat)\r\naccuracy_train = accuracy_score(y_train,y_prd_train)\r\nprint(\"Accuracy of bounding box image for trained data:\", accuracy_train )\r\n\r\n\r\n", "repo_name": "Ankitx21/Machine-learning-MNSIT-Dataset-", "sub_path": "naives_bayes.py", "file_name": "naives_bayes.py", "file_ext": "py", "file_size_in_byte": 5402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 96, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 106, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "28988851154", "text": "import shutil\nimport tempfile\nimport time\n\nimport pytest\nfrom pathlib import Path\nfrom itertools import chain\n\nfrom django.core.management import call_command\nfrom rest_framework import serializers\n\nfrom django_rest_tsg.build import (\n TypeScriptBuilder,\n TypeScriptBuilderConfig,\n build,\n get_relative_path,\n get_digest,\n)\nfrom tests.serializers import PathSerializer, PathWrapperSerializer\nfrom tests.test_dataclass import USER_INTERFACE\nfrom tests.tsgconfig import BUILD_TASKS\nfrom tests.test_serializer import PATH_INTERFACE, DEPARTMENT_INTERFACE\nfrom tests.test_enum import PERMISSION_FLAG_ENUM\n\n\nFOOBAR_CHILD_INTERFACE = \"\"\"import { FoobarParent } from './foobar-parent';\n\nexport interface FoobarChild {\n id: number;\n parent: Parent;\n parents: Parent[];\n text: string;\n intNumber: number;\n uuid: string;\n url: string;\n description: string;\n config: any;\n time: string;\n slug: string;\n ipAddress: string;\n email: string;\n boolValue: boolean;\n floatNumber: number;\n}\"\"\"\n\nPATH_WRAPPER_INTERFACE = \"\"\"import { Path } from '../path';\n\nexport interface PathWrapper {\n path: Path;\n meta: any;\n}\"\"\"\n\nDEPARTMENT_INTERFACE = (\n \"\"\"import { User } from './user';\n\n\"\"\"\n + DEPARTMENT_INTERFACE\n)\n\nPATH_V2_INTERFACE = \"\"\"export interface Path {\n name: string;\n suffix: string;\n suffixes: string[];\n stem: string;\n isDirectory: boolean;\n size: number;\n metadata: any;\n}\"\"\"\n\n\n@pytest.fixture()\ndef another_build_dir():\n d = tempfile.TemporaryDirectory(prefix=\"django-rest-tsg\")\n path = Path(d.name)\n subdir = path / \"sub\"\n subdir.mkdir(exist_ok=True)\n yield path\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef skip_lines(content: str, lines: int = 6):\n return \"\\n\".join(content.splitlines()[lines:])\n\n\ndef test_get_relative_path():\n path = Path(\"/var/tmp/django-rest-tsg/foo/bar.ts\")\n dependency_path = Path(\"/var/tmp/cache/django-rest-tsg/bar/foo.ts\")\n assert (\n get_relative_path(path, dependency_path)\n == \"../../cache/django-rest-tsg/bar/foo.ts\"\n )\n path = Path(\"/var/tmp/django-rest-tsg/foo.ts\")\n dependency_path = Path(\"/var/tmp/django-rest-tsg/foo/bar/foobar.ts\")\n assert get_relative_path(path, dependency_path) == \"./foo/bar/foobar.ts\"\n path = Path(\"/var/tmp/django-rest-tsg/foo.ts\")\n dependency_path = Path(\"/var/tmp/django-rest-tsg/bar.ts\")\n assert get_relative_path(path, dependency_path) == \"./bar.ts\"\n\n\ndef test_builder(tmp_path: Path, another_build_dir: Path):\n sub_dir = another_build_dir / \"sub\"\n tasks = BUILD_TASKS[1:]\n tasks.insert(0, build(PathSerializer, options={\"build_dir\": another_build_dir}))\n tasks.append(build(PathWrapperSerializer, options={\"build_dir\": sub_dir}))\n config = TypeScriptBuilderConfig(build_dir=tmp_path, tasks=tasks)\n builder = TypeScriptBuilder(config)\n builder.build_all()\n tmp_files = {\n file.name: file.read_text()\n for file in chain(\n tmp_path.iterdir(),\n iter(f for f in another_build_dir.iterdir() if f.is_file()),\n sub_dir.iterdir(),\n )\n }\n assert len(tmp_files) == len(tasks)\n assert \"path.ts\" in tmp_files\n assert skip_lines(tmp_files[\"path.ts\"]) == PATH_INTERFACE\n assert \"foobar-child.ts\" in tmp_files\n assert skip_lines(tmp_files[\"foobar-child.ts\"]) == FOOBAR_CHILD_INTERFACE\n assert \"permission-flag.enum.ts\" in tmp_files\n assert skip_lines(tmp_files[\"permission-flag.enum.ts\"], 6) == PERMISSION_FLAG_ENUM\n assert \"user.ts\" in tmp_files\n assert skip_lines(tmp_files[\"user.ts\"], 8) == USER_INTERFACE\n assert \"path-wrapper.ts\" in tmp_files\n assert skip_lines(tmp_files[\"path-wrapper.ts\"]) == PATH_WRAPPER_INTERFACE\n assert \"department.ts\" in tmp_files\n assert skip_lines(tmp_files[\"department.ts\"]) == DEPARTMENT_INTERFACE\n\n\ndef test_command(tmp_path: Path):\n call_command(\"buildtypescript\", \"tests\", \"--build-dir\", str(tmp_path))\n tmp_files = {\n file.name: file.read_text()\n for file in chain(tmp_path.iterdir(), tmp_path.iterdir())\n }\n assert len(tmp_files) == len(BUILD_TASKS)\n assert \"path.ts\" in tmp_files\n assert \"foobar-child.ts\" in tmp_files\n assert \"permission-flag.enum.ts\" in tmp_files\n\n\ndef test_content_change(tmp_path: Path):\n tasks = [build(PathSerializer)]\n config = TypeScriptBuilderConfig(build_dir=tmp_path, tasks=tasks)\n builder = TypeScriptBuilder(config)\n builder.build_all()\n build_file = tmp_path / \"path.ts\"\n digest = get_digest(build_file)\n content = build_file.read_text()\n last_modified_on = build_file.stat().st_mtime\n # no change\n builder.build_all()\n same_digest = get_digest(build_file)\n same_content = build_file.read_text()\n same_last_modified_on = build_file.stat().st_mtime\n assert digest == same_digest\n assert content == same_content\n assert last_modified_on == same_last_modified_on\n # add field\n class PathVersion2Serializer(PathSerializer):\n metadata = serializers.JSONField()\n\n tasks = [build(PathVersion2Serializer, {\"alias\": \"Path\"})]\n config = TypeScriptBuilderConfig(build_dir=tmp_path, tasks=tasks)\n builder = TypeScriptBuilder(config)\n time.sleep(0.1)\n builder.build_all()\n digest_v2 = get_digest(build_file)\n last_modified_on_v2 = build_file.stat().st_mtime\n\n assert skip_lines(build_file.read_text()) == PATH_V2_INTERFACE\n assert digest != digest_v2\n assert last_modified_on_v2 > last_modified_on\n", "repo_name": "jinkanhq/django-rest-tsg", "sub_path": "tests/test_build.py", "file_name": "test_build.py", "file_ext": "py", "file_size_in_byte": 5451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tests.test_serializer.DEPARTMENT_INTERFACE", "line_number": 53, "usage_type": "name"}, {"api_name": "tests.test_serializer.DEPARTMENT_INTERFACE", "line_number": 57, "usage_type": "name"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 73, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 74, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 71, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.get_relative_path", "line_number": 89, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 92, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 93, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.get_relative_path", "line_number": 94, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 95, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 96, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.get_relative_path", "line_number": 97, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 100, "usage_type": "name"}, {"api_name": "tests.tsgconfig.BUILD_TASKS", "line_number": 102, "usage_type": "name"}, {"api_name": "django_rest_tsg.build.build", "line_number": 103, "usage_type": "call"}, {"api_name": "tests.serializers.PathSerializer", "line_number": 103, "usage_type": "argument"}, {"api_name": "django_rest_tsg.build.build", "line_number": 104, "usage_type": "call"}, {"api_name": "tests.serializers.PathWrapperSerializer", "line_number": 104, "usage_type": "argument"}, {"api_name": "django_rest_tsg.build.TypeScriptBuilderConfig", "line_number": 105, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.TypeScriptBuilder", "line_number": 106, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 110, "usage_type": "call"}, {"api_name": "tests.test_serializer.PATH_INTERFACE", "line_number": 118, "usage_type": "name"}, {"api_name": "tests.test_enum.PERMISSION_FLAG_ENUM", "line_number": 122, "usage_type": "name"}, {"api_name": "tests.test_dataclass.USER_INTERFACE", "line_number": 124, "usage_type": "name"}, {"api_name": "tests.test_serializer.DEPARTMENT_INTERFACE", "line_number": 128, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 131, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 132, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 135, "usage_type": "call"}, {"api_name": "tests.tsgconfig.BUILD_TASKS", "line_number": 137, "usage_type": "argument"}, {"api_name": "pathlib.Path", "line_number": 143, "usage_type": "name"}, {"api_name": "django_rest_tsg.build.build", "line_number": 144, "usage_type": "call"}, {"api_name": "tests.serializers.PathSerializer", "line_number": 144, "usage_type": "argument"}, {"api_name": "django_rest_tsg.build.TypeScriptBuilderConfig", "line_number": 145, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.TypeScriptBuilder", "line_number": 146, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.get_digest", "line_number": 149, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.get_digest", "line_number": 154, "usage_type": "call"}, {"api_name": "tests.serializers.PathSerializer", "line_number": 161, "usage_type": "name"}, {"api_name": "rest_framework.serializers.JSONField", "line_number": 162, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 162, "usage_type": "name"}, {"api_name": "django_rest_tsg.build.build", "line_number": 164, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.TypeScriptBuilderConfig", "line_number": 165, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.TypeScriptBuilder", "line_number": 166, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 167, "usage_type": "call"}, {"api_name": "django_rest_tsg.build.get_digest", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "23744308858", "text": "import time\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import permutations\n\n\ndef load_permutations(filename=\"preferences.csv\"):\n \"\"\"\n Load all student permutations from a file\n\n Parameters\n ----------\n filename: string\n Path to a file\n \n Returns\n -------\n stars: A list of stars in alphabetical order\n raters: dictionary( \n string (Ranker's name): list (This person's permutation as a list of numbers\n corresponding to the indices in stars)\n )\n \"\"\"\n raters = {}\n fin = open(filename)\n lines = fin.readlines()\n fin.close()\n stars = [s.rstrip().replace(\"\\\"\", \"\") for s in lines[0].split(\",\")[1::]]\n for line in lines[1::]:\n fields = line.split(\",\")\n rater = fields[0].replace(\"\\\"\", \"\")\n fields = [int(f) for f in fields[1::]]\n raters[rater] = [0]*len(fields)\n for i, x in enumerate(fields):\n raters[rater][x-1] = i\n return stars, raters\n\n\ndef mds(D):\n \"\"\"\n Perform classic multidimensional scaling\n See notes here:\n http://www.cs.umd.edu/~djacobs/CMSC828/MDSexplain.pdf\n\n Parameters\n ----------\n D: ndarray(N, N)\n A matrix of pairwise similarities\n \n Return\n ------\n Y: ndarray(N, N)\n MDS projection, with columns in order of variance\n explained\n \"\"\"\n from numpy import linalg\n N = D.shape[0]\n H = np.eye(N) - np.ones((N, N))/N\n B = -0.5*(H.dot((D*D).dot(H)))\n U, s, V = linalg.svd(B)\n Y = np.sqrt(s[None, :])*U\n return Y\n\ndef plot_mds_distances(raters, random_state=0):\n \"\"\"\n Compute all pairwise Kendall-Tau distances and plot a dimension \n reduction from the Kendall-Tau metric space to 2D to visualize how\n similar different raters are\n\n Parameters\n ----------\n raters: dictionary \n string (Ranker's name): list (This person's permutation as a list of numbers\n corresponding to the indices in animals)\n random_state: int\n A seed to determine which random isometry to use for MDS\n \"\"\"\n N = len(raters)\n D = np.zeros((N, N))\n rlist = [r for r in raters]\n for i, rater1 in enumerate(rlist):\n for j in range(i+1, N):\n rater2 = rlist[j]\n D[i, j] = kendall_tau(raters[rater1], raters[rater2])\n D = D+D.T\n X = mds(D)\n plt.scatter(X[:, 0], X[:, 1])\n for i, r in enumerate(rlist):\n plt.text(X[i, 0], X[i, 1], r)\n plt.title(\"MDS Projected Kendall-Tau Distances\")\n\n\ndef kendall_tau(p1, p2):\n \"\"\"\n An O(N^2) algorithm for computing the Kendall-Tau Distance\n\n Parameters\n ----------\n p1: List of N elements\n A permutation of the elements 0, 1, 2, ..., N-1 corresponding \n to the first rating\n p2: List of N elements\n A permutation of the elements 0, 1, 2, .., N-1 corresponding to \n the second rating\n \n Returns\n -------\n The Kendall-Tau distance between permutation p1 and p2\n \"\"\"\n n = len(p1)\n discordant = 0\n indecies1 = []\n indecies2 = []\n\n #pre-processing \n for i in range(n):\n indecies1.append(p1.index(i))\n indecies2.append(p2.index(i))\n\n for i in range(0, n):\n \n for j in range(i+1, n):\n\n if (indecies1[i] > indecies1[j] and indecies2[i] > indecies2[j]) or (indecies1[i] < indecies1[j] and indecies2[i] < indecies2[j]):\n pass\n else:\n discordant += 1\n return discordant ## TODO: This is a dummy value!\n\n\n## TODO: Fill everything else in!\ndef get_diameter(rankings):\n p1 = \"\"\n p2 = \"\"\n maxdist = 0\n keys = list(rankings.keys())\n lenk = len(keys)\n for i in range(lenk):\n for j in range(i+1, lenk):\n kt = kendall_tau(rankings[keys[i]], rankings[keys[j]])\n if kt > maxdist:\n maxdist = kt\n p1 = keys[i]\n p2 = keys[j]\n\n return p1, rankings[p1], p2, rankings[p2], maxdist\n\ndef get_average_ranking(stars, raters):\n sums = {}#np.zeros(len(stars))\n keys = list(raters.keys())\n for star in stars:\n sums[star] = 0\n\n\n for key in keys:\n for i in range(len(stars)):\n sums[stars[i]] += raters[key].index(i) + 1\n \n for i in range(len(sums)):\n sums[stars[i]] /= len(keys)\n \n res = dict(sorted(sums.items(), key=lambda x:x[1]))\n print(list(res.keys()))\n\ndef kendal_tau_fast(p1, p2):\n indecies = []\n for i in range(len(p1)):\n indecies.append(p1.index(i))\n\n new = []\n for i in range(len(p2)):\n p2[i] = indecies[p2[i]]\n\n y = [0]*len(p2)\n return mergesort_rec(p2, y, 0, len(p2)-1)\n\ndef kemeny(raters):\n stars = [0,1,2,3,4,5,6,7]\n perms = list(permutations(stars))\n \n keys = raters.keys()\n \n best = []\n \n m = 10000000\n \n for perm in perms:\n d = 0\n for key in keys:\n d += kendal_tau_fast(list(perm), raters[key])\n if d < m:\n m = d\n best = perm\n \n return best \n \n \ndef merge(x, y, i1, mid, i2):\n \"\"\"\n Perform a merge of two contiguous sorted sub-chunks of\n the array x, using y as a staging area\n\n Parameters\n ----------\n x: list\n The main array\n y: list\n The array to copy into as the two chunks are being merged\n i1: int\n Left of first chunk\n mid: int\n Right of first chunk\n i2: int\n End of second chunk\n \"\"\"\n cursorL = i1\n cursorR = mid + 1\n idx = i1\n swaps = 0\n\n while cursorL <= mid and cursorR <= i2:\n if x[cursorL] <= x[cursorR]:\n y[idx] = x[cursorL]\n idx += 1\n cursorL += 1\n elif x[cursorR] < x[cursorL]:\n y[idx] = x[cursorR]\n idx += 1\n cursorR += 1\n swaps += mid - cursorL + 1\n\n while cursorL <= mid:\n y[idx] = x[cursorL]\n idx += 1\n cursorL += 1\n\n while cursorR <= i2:\n y[idx] = x[cursorR]\n idx += 1\n cursorR += 1\n \n for i in range(i1, i2 + 1):\n x[i] = y[i]\n \n return swaps\n\ndef mergesort_rec(x, y, i1, i2):\n \"\"\"\n A recursive call to sort a subset of the array\n\n Parameters\n ----------\n x: list\n Array to sort\n y: list\n A temporary array / staging area to store intermediate results\n i1: int\n First index of chunk to sort, inclusive\n i2: int\n Second index of chunk to sort, inclusive (i2 >= i1)\n \"\"\"\n ret = 0\n if i1 < i2:\n mid = i1 + (i2 - i1) // 2\n ## TODO: Fill this in\n left = mergesort_rec(x, y, i1, mid)\n right = mergesort_rec(x, y, mid + 1, i2)\n merge_result = merge(x, y, i1, mid, i2) \n ret = left + right + merge_result\n return ret \n\ndef compare_cover_songs(Songs):\n g = np.zeros((32, 32))\n for i in range(len(Songs) - 1):\n for j in range(i + i, len(Songs) - 1):\n d = kendal_tau_fast(Songs[i][\"rankings\"], Songs[j][\"rankings\"])\n g[i][j] = d \n g[j][i] = d \n return g\n\n\n\ndef plot_tune_similarities(tunes, D):\n \"\"\"\n Parameters\n ----------\n tunes: list of N dictionary items\n Tunes loaded in from the JSON file\n D: ndarray(N, N)\n An NxN matrix with the Kendall-Tau distances between\n all pairs of tunes\n \"\"\"\n pix = np.arange(len(tunes))\n J, I = np.meshgrid(pix, pix)\n J = J.flatten()\n I = I.flatten()\n sz = np.max(D)-D\n sz /= np.max(sz)\n sz = sz**1.5\n sz = 40*sz/np.max(sz)\n \n plt.figure(figsize=(12, 10))\n plt.scatter(J, I, s=sz, c=D, cmap='magma')\n plt.gca().invert_yaxis()\n plt.xticks(np.arange(D.shape[0]), [t[\"name\"] + \" \" + t[\"version\"] for t in tunes], rotation=90)\n plt.yticks(np.arange(D.shape[0]), [t[\"name\"] + \" \" + t[\"version\"] for t in tunes])\n plt.colorbar()\n plt.gca().set_facecolor((0.9, 0.9, 0.9))", "repo_name": "Duntron1000/CS271", "sub_path": "HW4_FairElections-main/ranking.py", "file_name": "ranking.py", "file_ext": "py", "file_size_in_byte": 7942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.eye", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "itertools.permutations", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 309, "usage_type": "name"}]} +{"seq_id": "30587851771", "text": "import xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element\nimport sys\n\ndef main(argv):\n\t\n\ttree = ET.parse(argv[1])\n\troot = tree.getroot()\n\tfor POINT in root.findall('POINT'):\n\t\tbottomhz=POINT.find(\"BOTTOM_HZ\").text\n\t\ttophz=POINT.find(\"TOP_HZ\").text\n\t\tf0end=POINT.find(\"F0_END\").text\n\t\tf0start=POINT.find(\"F0_START\").text\n\t\tBOTTOM_HZ=float(bottomhz)\n\t\tTOP_HZ=float(tophz)\n\t\tF0_END=float(f0end)\n\t\tF0_START=float(f0start)\n\t\tif F0_END < BOTTOM_HZ or F0_END >TOP_HZ or F0_START< BOTTOM_HZ or F0_START >TOP_HZ:\n\t\t\troot.remove(POINT)\n\ttree.write(argv[2])\nmain(sys.argv)\n", "repo_name": "josinerawee/gevpro-week3", "sub_path": "spontal_filter.py", "file_name": "spontal_filter.py", "file_ext": "py", "file_size_in_byte": 581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 7, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 7, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "27164837411", "text": "# pylint: disable=missing-module-docstring (C0114)\n# pylint: disable=missing-function-docstring (C0116)\nimport pytest\n\nfrom core import FDM, HM, RGB, Params\nfrom matching import operation_context_builder\nfrom matching.operations import FeatureDistributionMatching, HistogramMatching\nfrom utils.cs_conversion.cs_converter import ColorSpaceConverter\n\n\ndef test_operation_context_builder() -> None:\n params = Params({'color_space': RGB,\n 'channels': '0,1,2',\n 'match_proportion': 1.0,\n 'verify_input': True})\n op_ctx = operation_context_builder.build_operation_context(HM, params)\n assert isinstance(op_ctx.operation, HistogramMatching)\n assert isinstance(op_ctx.converter, ColorSpaceConverter)\n\n op_ctx = operation_context_builder.build_operation_context(FDM, params)\n assert isinstance(op_ctx.operation, FeatureDistributionMatching)\n assert isinstance(op_ctx.converter, ColorSpaceConverter)\n\n with pytest.raises(ValueError):\n operation_context_builder.build_operation_context('1337', params)\n", "repo_name": "continental/image-statistics-matching", "sub_path": "tests/matching/test_operation_context_builder.py", "file_name": "test_operation_context_builder.py", "file_ext": "py", "file_size_in_byte": 1082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "61", "api": [{"api_name": "core.Params", "line_number": 12, "usage_type": "call"}, {"api_name": "core.RGB", "line_number": 12, "usage_type": "name"}, {"api_name": "matching.operation_context_builder.build_operation_context", "line_number": 16, "usage_type": "call"}, {"api_name": "core.HM", "line_number": 16, "usage_type": "argument"}, {"api_name": "matching.operation_context_builder", "line_number": 16, "usage_type": "name"}, {"api_name": "matching.operations.HistogramMatching", "line_number": 17, "usage_type": "argument"}, {"api_name": "utils.cs_conversion.cs_converter.ColorSpaceConverter", "line_number": 18, "usage_type": "argument"}, {"api_name": "matching.operation_context_builder.build_operation_context", "line_number": 20, "usage_type": "call"}, {"api_name": "core.FDM", "line_number": 20, "usage_type": "argument"}, {"api_name": "matching.operation_context_builder", "line_number": 20, "usage_type": "name"}, {"api_name": "matching.operations.FeatureDistributionMatching", "line_number": 21, "usage_type": "argument"}, {"api_name": "utils.cs_conversion.cs_converter.ColorSpaceConverter", "line_number": 22, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 24, "usage_type": "call"}, {"api_name": "matching.operation_context_builder.build_operation_context", "line_number": 25, "usage_type": "call"}, {"api_name": "matching.operation_context_builder", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "10341510154", "text": "import sys, os, threading, traceback, signal\nfrom datetime import datetime\nfrom subprocess import call, Popen, PIPE, STDOUT\n\nlardm_dir = os.path.dirname(os.path.realpath(__file__))\n# Get configuration for system-dependent paths, etc.\n\nexecfile(\"/etc/lardm.conf\")\n\n#import gettext\n#lang = os.getenv(\"LANG\")\n#if lang:\n# gettext.install('lardm', 'i18n', unicode=1)\n\nfrom socket import socket, AF_UNIX, SOCK_STREAM\n\nclass Lardmd:\n def __init__(self):\n if os.path.isfile(logfile):\n call([\"mv\", logfile, logfile + \".1\"])\n # Create an unbound and not-connected socket.\n self.sock = socket(AF_UNIX, SOCK_STREAM)\n # Bind the socket to 'lardmd' in the abstract namespace.\n # Note the null-byte.\n self.sock.bind(\"\\0lardmd\")\n # Create a backlog queue for up to 1 connection.\n self.sock.listen(1)\n self.log(\"Started\\n\")\n self.new_session(autouser)\n\n\n def wait(self):\n try:\n conn = None\n # Blocks until a connection arrives:\n conn = self.sock.accept()[0]\n # A tuple (connected_socket, None) is returned upon connection.\n # Note we get the first argument and throw away the rest.\n self.log(\"Connection accepted\\n\")\n # Say hi\n conn.send(\"ok\\n\")\n\n # Wait for response\n msg = conn.recv(64)\n if msg and (msg.strip() == \"NEW\"):\n if self.new_session():\n conn.send(\"done\\n\")\n\n except:\n self.log(\"\".join(traceback.format_exc()))\n\n if conn:\n # Close the connection.\n # This will unblock the other peer in case it's waiting for\n # another message.\n self.log(\"Closing connection\\n\")\n conn.close()\n\n\n def new_session(self, user=\"\"):\n # Determine which display to use\n # Look for a free display/tty pair\n display = None\n for dn, tty in lardmd_displays:\n p = Popen([\"/usr/bin/xdpyinfo\", \"-display\", dn],\n stdout=PIPE, stderr=STDOUT)\n p.communicate()\n if p.returncode == 1:\n p = Popen([\"/usr/bin/pgrep\", \"-t\", tty],\n stdout=PIPE, stderr=STDOUT)\n p.communicate()\n if p.returncode == 1:\n display = dn\n break\n if not display:\n self.log(\"*** Couldn't open display\\n\")\n return False\n t = threading.Thread(target=self.start_session,\n args=(display, tty, user))\n t.start()\n return True\n\n\n def start_session(self, display, tty, user):\n # Start new session\n process = Popen([\"/usr/bin/xinit\", \"/bin/bash\", \"--login\", \"-c\",\n \"%s/lardm.py %s_%s\" % (lardm_dir, display, user),\n \"--\", \"/usr/bin/X\", display, \"vt\" + tty[3:], \"-nolisten\", \"tcp\"],\n stdout=PIPE, stderr=STDOUT)\n displays.append(tty)\n while True:\n line = process.stdout.readline()\n if not line:\n break\n self.log(display + \"]\" + line)\n displays.remove(tty)\n if displays:\n call([\"/usr/bin/chvt\", displays[-1][3:]])\n else:\n self.new_session()\n\n\n def log(self, line):\n fh = open(logfile, \"a\")\n ts = datetime.now().strftime(\"%Y-%m-%d/%H:%M:%S[\")\n fh.write(ts + line)\n fh.close()\n\n\ndef tidy(type, value, tb):\n lardmd.log(\"Trap:\\n\" +\n \"\".join(traceback.format_exception(type, value, tb)))\n end()\n\ndef end(*args):\n lardmd.log(\"Killing displays\\n\")\n for tty in displays:\n call([\"pkill\", \"-t\", tty])\n lardmd.log(\"Exiting\\n\")\n os._exit(1)\n\nsys.excepthook = tidy\nsignal.signal(signal.SIGTERM, end)\n#signal.signal(signal.SIGKILL, end) - doesn't work\n\n\nif __name__ == \"__main__\":\n displays = []\n lardmd = Lardmd()\n fh = open(\"/var/run/lardmd.pid\", \"w\")\n fh.write(\"%d\\n\" % os.getpid())\n fh.close()\n while True:\n lardmd.wait()\n\n\n", "repo_name": "BackupTheBerlios/larch", "sub_path": "larch7/abs/lardm/lardmd.py", "file_name": "lardmd.py", "file_ext": "py", "file_size_in_byte": 4077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 20, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 22, "usage_type": "call"}, {"api_name": "socket.AF_UNIX", "line_number": 22, "usage_type": "argument"}, {"api_name": "socket.SOCK_STREAM", "line_number": 22, "usage_type": "argument"}, {"api_name": "traceback.format_exc", "line_number": 50, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 65, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 66, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 66, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 69, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 70, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 70, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 78, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 86, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 89, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 89, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "name"}, {"api_name": "traceback.format_exception", "line_number": 112, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 118, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.excepthook", "line_number": 122, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 123, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "32174873763", "text": "from datetime import datetime\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport torch\nimport glob\nimport librosa\n\ndef create_date_folder(checkpoints_path,name):\n if not os.path.exists(checkpoints_path):\n os.mkdir(checkpoints_path)\n date = datetime.now()\n day = date.strftime('%d-%m-%Y_')\n path = f'{checkpoints_path}{day}{str(date.hour)}_{name}'\n print(path)\n if not os.path.exists(path):\n os.mkdir(path)\n return path\n\n#get the number of classes from the number of folders in the audio dir\ndef get_n_classes(audio_path):\n root, dirs, files = next(os.walk(audio_path))\n n_classes = len(dirs)\n print(f'Found {n_classes} different classes in {audio_path}')\n return n_classes\n\ndef min_max_denormalize(normalized_data, original_min, original_max, feature_range=(-1, 1)):\n min_val, max_val = feature_range\n X_std = (normalized_data - min_val) / (max_val - min_val)\n X_denorm = X_std * (original_max - original_min) + original_min\n return X_denorm\n\ndef torch_denormalize(normalized_data, original_min, original_max, feature_range=(-1, 1)):\n min_val, max_val = feature_range\n normalized_data = torch.as_tensor(normalized_data).to(original_min.device)\n X_std = (normalized_data - min_val) / (max_val - min_val)\n X_denorm = X_std * (original_max - original_min) + original_min\n return X_denorm\n\n\nclass CustomDataset(Dataset):\n def __init__(self, preprocessed_dir):\n # Load preprocessed data from numpy files\n self.melspecs = np.load(os.path.join(preprocessed_dir, \"melspecs.npy\"))\n self.loudness = np.load(os.path.join(preprocessed_dir, \"loudness.npy\"))\n self.labels = np.load(os.path.join(preprocessed_dir, \"labels.npy\"))\n self.signals = np.load(os.path.join(preprocessed_dir, \"signals.npy\"))\n\n def __len__(self):\n return len(self.melspecs)\n\n def __getitem__(self, idx):\n melspec = torch.tensor(self.melspecs[idx], dtype=torch.float32)\n loudness = torch.tensor(self.loudness[idx], dtype=torch.float32)\n label = torch.tensor(self.labels[idx], dtype=torch.long)\n signals = torch.tensor(self.signals[idx], dtype=torch.long)\n return melspec, loudness, label, signals\n \ndef get_dataloader(preprocessed_dir, batch_size=64, shuffle=True, num_workers=0):\n dataset = CustomDataset(preprocessed_dir)\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=True)\n return dataloader\n\n\n# Vocoder setup\ndef load_checkpoint(filepath, device):\n assert os.path.isfile(filepath)\n checkpoint_dict = torch.load(filepath, map_location=device)\n return checkpoint_dict\n\ndef scan_checkpoint(cp_dir, prefix):\n pattern = os.path.join(cp_dir, prefix + '*')\n cp_list = glob.glob(pattern)\n if len(cp_list) == 0:\n return ''\n return sorted(cp_list)[-1]\n\ndef inference(melspec, MAX_WAV_VALUE, Vocoder, h, device):\n generator = Vocoder(h).to(device)\n\n state_dict_g = load_checkpoint('./hifigan/hifigan_vocoder.ckpt', device)\n generator.load_state_dict(state_dict_g['generator'])\n\n generator.eval()\n generator.remove_weight_norm()\n\n with torch.no_grad():\n melspec = torch.FloatTensor(melspec).to(device)\n y_g_hat = generator(melspec)\n audio = y_g_hat.squeeze()\n audio = audio * MAX_WAV_VALUE\n audio = audio.cpu().numpy().astype('int16')\n return audio\n\ndef inverse_mel_spectrogram(mel_spec, sr, n_fft, n_mels, fmin, fmax):\n # Invert mel scale to linear scale\n mel_basis_inv = np.linalg.pinv(librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax))\n return np.dot(mel_basis_inv, mel_spec)\n\ndef approximate_loudness_from_melspec(mel_spec, sr, n_fft, fmin, fmax, n_mels):\n print(melspec.min(), melspec.max())\n max_val = np.load('preprocessed/max_val.npy')\n min_val = np.load('preprocessed/min_val.npy')\n melspec = min_max_denormalize(melspec, min_val, max_val)\n print(melspec.min(), melspec.max())\n # Inverse mel-spectrogram to STFT\n spec_approx = inverse_mel_spectrogram(mel_spec, sr, n_fft, n_mels, fmin, fmax)\n\n # Convert to power spectrum and apply logarithmic scaling\n spec_approx_log = np.log(np.abs(spec_approx)**2 + 1e-7)\n\n # Apply A-weighting\n f = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n a_weight = librosa.A_weighting(f)\n spec_weighted = spec_approx_log + a_weight.reshape(-1, 1)\n\n # Average across frequency bands\n loudness_approx = np.mean(spec_weighted, axis=0)\n\n return loudness_approx\n\ndef approximate_loudness_from_batch_melspec(batch_mel_spec, sr, n_fft, fmin, fmax, n_mels):\n max_val = torch.from_numpy(np.load('preprocessed/max_val.npy')).to(batch_mel_spec.device)\n min_val = torch.from_numpy(np.load('preprocessed/min_val.npy')).to(batch_mel_spec.device)\n batch_mel_spec = torch_denormalize(batch_mel_spec, min_val, max_val)\n batch_size = batch_mel_spec.size(0)\n loudness_batch = []\n\n # Compute the inverted mel basis\n mel_basis_inv = np.linalg.pinv(librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax))\n mel_basis_inv = torch.from_numpy(mel_basis_inv).float().to(batch_mel_spec.device)\n\n for i in range(batch_size):\n # Reshape mel_spec for each item in the batch to 2D: [frequency_bins, time_steps]\n mel_spec = batch_mel_spec[i].squeeze(0) # Remove the channel dimension, resulting in [400, 64]\n \n # Transpose to get [frequency_bins, time_steps]\n mel_spec = mel_spec.transpose(0, 1) # Now shape [64, 400]\n\n # Inverse mel-spectrogram (approximate)\n spec_approx = torch.mm(mel_basis_inv, mel_spec)\n \n # Convert to power spectrum and apply logarithmic scaling\n spec_approx_log = torch.log(spec_approx.pow(2) + 1e-7)\n\n # Apply A-weighting\n f = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n a_weight = librosa.A_weighting(f)\n spec_weighted = spec_approx_log + torch.tensor(a_weight.reshape(-1, 1)).to(spec_approx_log.device)\n\n # Average across frequency bands to approximate loudness\n loudness_approx = torch.mean(spec_weighted, axis=0)\n loudness_batch.append(loudness_approx)\n return torch.stack(loudness_batch).view(batch_size, 400, 1).float()\n\n# def mel_to_wav(melspec):\n# # Generate melspectrograms\n# melspec = utils.min_max_denormalize(fake_imgs, min_val, max_val)\n# melspec = torch.permute(melspec, (0, 1, 3, 2))\n# melspec = melspec.squeeze(1) \n# gen_audio = utils.inference(melspec, MAX_WAV_VALUE, Vocoder, h, device)\n# gen_audio = torch.tensor(gen_audio, dtype=torch.float32)\n# target_length = 65536\n# pad_right = target_length - gen_audio.size(1)\n# # Apply padding to the end\n# gen_audio = F.pad(gen_audio, (0, pad_right)) # Only pad the end\n# print(gen_audio.shape)", "repo_name": "Reinliu/STGAN", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 6890, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 97, "usage_type": "attribute"}, {"api_name": "librosa.filters.mel", "line_number": 97, "usage_type": "call"}, {"api_name": "librosa.filters", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 110, "usage_type": "call"}, {"api_name": "librosa.fft_frequencies", "line_number": 113, "usage_type": "call"}, {"api_name": "librosa.A_weighting", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 130, "usage_type": "attribute"}, {"api_name": "librosa.filters.mel", "line_number": 130, "usage_type": "call"}, {"api_name": "librosa.filters", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 144, "usage_type": "call"}, {"api_name": "librosa.fft_frequencies", "line_number": 147, "usage_type": "call"}, {"api_name": "librosa.A_weighting", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "8031405537", "text": "# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('statisticscore', '0011_auto_20150820_1639'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='activedebate',\n name='active_debate',\n field=models.CharField(max_length=8, null=True, blank=True),\n ),\n ]\n", "repo_name": "eyp-developers/statistics", "sub_path": "statisticscore/migrations/0012_auto_20150822_1237.py", "file_name": "0012_auto_20150822_1237.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "18565453607", "text": "#import OpenCV module\nimport cv2\n#import os module for reading training data directories and paths\nimport os, sys\n#import numpy to convert python lists to numpy arrays as \n#it is needed by OpenCV face recognizers\nimport numpy as np\n\nimport re\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected, flatten\nfrom tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d\nfrom tflearn.layers.merge_ops import merge\nfrom tflearn.layers.normalization import local_response_normalization\nfrom tflearn.layers.estimator import regression\nfrom os.path import isfile, join\n\nimport random\nimport subprocess, csv\nfrom constants import *\nfrom PIL import Image, ImageDraw, ImageFont\nglobal face, faces\nimport time\n\nrecognizer_gen = cv2.face.LBPHFaceRecognizer_create()\n\nrecognizer_gen.read(\"genero.yml\")\nprint(\"creado_genero\")\nsubjects_gen = [\"femenino\" , \"masculino\"]\n\nrecognizer_edad = cv2.face.LBPHFaceRecognizer_create()\n\nrecognizer_edad.read(\"edad.yml\")\nprint(\"creado_edad\")\nsubjects_edad = [\"adulto\" , \"joven\", \"viejo\",\"nino\" ]\nultim_faces=None\nfr=0\nfem = porc_fem = mas =porc_masc= adulto=joven=viejo=nino=p_adulto=p_joven=p_nino=p_viejo=0\nneutral =feliz =triste =p_neutral=p_feliz=p_triste=0\n#################\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')\n\n#font = cv2.FONT_HERSHEY_SIMPLEX\n\n\nvideo_capture = cv2.VideoCapture(0)\nfeelings_faces = []\n\n\n#####################\ndef porc_genero(genero):\n\tglobal fem\n\tglobal mas\n\tglobal porc_fem\n\tglobal porc_masc\n\tif(genero==\"femenino\"):\n\t\tfem+=1\n\telif(genero==\"masculino\"):\n\t\tmas+=1\n\tporc_fem=int(float(fem)/(fem+mas)*100)\n\tporc_masc=100-porc_fem\n\n\ndef porc_edad(edad):\n\tglobal adulto\n\tglobal joven\n\tglobal viejo\n\tglobal nino\n\tglobal p_adulto\n\tglobal p_joven\n\tglobal p_nino\n\tglobal p_viejo\n\tif (edad==\"adulto\"):\n\t\tadulto+=1\n\n\telif (edad==\"joven\"):\n\t\tjoven+=1\n\telif (edad==\"viejo\"):\n\t\tviejo+=1\n\t\t\n\telif (edad==\"nino\"):\n\t\tnino+=1\n\n\tp_adulto=int(float(adulto)/(adulto+joven+viejo+nino)*100)\n\tp_joven=int(float(joven)/(adulto+joven+viejo+nino)*100)\n\tp_viejo=int(float(viejo)/(adulto+joven+viejo+nino)*100)\n\tp_nino=int(float(nino)/(adulto+joven+viejo+nino)*100)\n\t\n\ndef porc_emo(emocion):\n\tglobal neutral\n\tglobal feliz\n\tglobal triste\n\tglobal p_neutral\n\tglobal p_feliz\n\tglobal p_triste\n\n\tif (emocion==\"Feliz\"):\n\t\tfeliz+=1\n\n\telif (emocion==\"Neutral\"):\n\t\tneutral+=1\n\telif (emocion==\"Triste\"):\n\t\ttriste+=1\n\n\tp_feliz=int(float(feliz)/(feliz+neutral+triste)*100)\n\tp_neutral=int(float(neutral)/(feliz+neutral+triste)*100)\n\tp_triste=int(float(triste)/(feliz+neutral+triste)*100)\n\n\t\n\n\n\ndef draw_rectangle(img, rect):\n\t(x, y, w, h) = rect\n\tcv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\n \n#function to draw text on give image starting from\n#passed (x, y) coordinates. \ndef draw_text(img, text, x, y):\n\tcv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 0.9, (0, 255, 0), 2)\n\n\n\ndef detect_face(img):\n #convert the test image to gray image as opencv face detector expects gray images\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n \n #load OpenCV face detector, I am using LBP which is fast\n #there is also a more accurate but slow Haar classifier\n\tface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n\tfaces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5);\n \n #if no faces are detected then return original img\n\tif (len(faces) == 0):\n\t\treturn None\n #num_face=0\n #for (x,y,w,h) in faces:\n #extract the face area\n # (x, y, w, h) = faces[num_face]\n # num_face+=\n #return only the face part of the image\n #return gray[y:y+w, x:x+h], faces[num_face]\n\treturn faces\n\n\ndef predict(test_img,hora):\n\tglobal ultim_faces\n\tglobal fr\n #make a copy of the image as we don't want to chang original image\n\timg = test_img\n\tgray2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #detect face from the image\n\tfaces = detect_face(img)\n\t#print(faces)\n\t#print(\"ultima: \")\n\t#print(ultim_faces)\n\tif(fr>=5):\n\t\tfr=0\n\t\tif (faces is not None and ultim_faces is not None):\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t\tfor (a,b,c,d) in ultim_faces:\n\t\t\t\t\tif (y<(b+0.2*b) and y>(b-0.2*b) and x<(a+0.2*a) and x>(a-0.2*a)):\n\t\t\t\t\t\tprint(\"misma cara\")\n\t\t\t\t\t\trect=(x,y,w,h)\n\t\t\t\t\t\tlabel_gen= recognizer_gen.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\tlabel_edad= recognizer_edad.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\t#print(label)\n\t\t\t\t\t\t\t#get name of respective label returned by face recognizer\n\t\t\t\t\t\tlabel_text_gen = subjects_gen[label_gen[0]]\n\t\t\t\t\t\tlabel_text_edad = subjects_edad[label_edad[0]]\n\t\t\t\t\t\tresult = network.predict(format_image(img))\n\t\t\t\t\t\t\t\t \n\t\t\t\t\t\ttext_emo = EMOTIONS[np.argmax(result)]\n\t\t\t\t\t\t\t\t #draw a rectangle around face detected\n\t\t\t\t\t\tdraw_rectangle(img, rect)\n\t\t\t\t\t\t\t#draw name of predicted person\n\t\t\t\t\t\tdraw_text(img, label_text_gen+\" \"+label_text_edad+\" \"+text_emo, rect[0], rect[1]-5)\n\t\t\t\t\telse:\n\t\t\t\t\t\t\n\t\t\t\t\t\trect=(x,y,w,h)\n\t\t\t\t\t\tlabel_gen= recognizer_gen.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\tlabel_edad= recognizer_edad.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\t#print(label)\n\t\t\t\t\t\t\t#get name of respective label returned by face recognizer\n\t\t\t\t\t\tlabel_text_gen = subjects_gen[label_gen[0]]\n\t\t\t\t\t\tlabel_text_edad = subjects_edad[label_edad[0]]\n\t\t\t\t\t\tresult = network.predict(format_image(img))\n\t\t\t\t\t\t\t\t \n\t\t\t\t\t\ttext_emo = EMOTIONS[np.argmax(result)]\n\t\t\t\t\t\t\t\t #draw a rectangle around face detected\n\t\t\t\t\t\tdraw_rectangle(img, rect)\n\t\t\t\t\t\t\t#draw name of predicted person\n\t\t\t\t\t\tdraw_text(img, label_text_gen+\" \"+label_text_edad+\" \"+text_emo, rect[0], rect[1]-5)\n\t\t\t\t\t\tporc_genero(label_text_gen)\n\t\t\t\t\t\tporc_edad(label_text_edad)\n\t\t\t\t\t\tporc_emo(text_emo)\n\t\t\t\t\t\tcsv_writer.writerow([label_text_gen,label_text_edad,text_emo, str(hora)])\n\t\t\t\t\t\tprint(\"segunda\")\n\t\t\t\t\t\tcsvfile.flush()\n\t\t\tultim_faces = faces\n\t\telif(ultim_faces is None and faces is not None):\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t\t\trect=(x,y,w,h)\n\t\t\t\t\tlabel_gen= recognizer_gen.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\tlabel_edad= recognizer_edad.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t#print(label)\n\t\t\t\t\t\t#get name of respective label returned by face recognizer\n\t\t\t\t\tlabel_text_gen = subjects_gen[label_gen[0]]\n\t\t\t\t\tlabel_text_edad = subjects_edad[label_edad[0]]\n\t\t\t\t\tresult = network.predict(format_image(img))\n\t\t\t\t\t\t\t \n\t\t\t\t\ttext_emo = EMOTIONS[np.argmax(result)]\n\t\t\t\t\t\t\t #draw a rectangle around face detected\n\t\t\t\t\tdraw_rectangle(img, rect)\n\t\t\t\t\t\t#draw name of predicted person\n\t\t\t\t\tdraw_text(img, label_text_gen+\" \"+label_text_edad+\" \"+text_emo, rect[0], rect[1]-5)\n\t\t\t\t\tporc_genero(label_text_gen)\n\t\t\t\t\tporc_edad(label_text_edad)\n\t\t\t\t\tprint(text_emo)\n\t\t\t\t\tporc_emo(text_emo)\n\t\t\t\t\tcsv_writer.writerow([label_text_gen,label_text_edad,text_emo, str(hora)])\n\t\t\t\t\tcsvfile.flush()\n\t\t\tultim_faces = faces\n\t\t\t\n\t\telif(faces is None):\n\t\t\tprint(\"no se encontraron rostros\")\n\t\t\tultim_faces = faces\n\n\tif (faces is not None and ultim_faces is not None):\n\t\tif (len(faces)!= len(ultim_faces)):\n\t\t\tfr+=1\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t\trect=(x,y,w,h)\n\t\t\t\tlabel_gen= recognizer_gen.predict(gray2[y:y+h,x:x+w])\n\t\t\t\tlabel_edad= recognizer_edad.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t#print(label)\n\t\t\t\t#get name of respective label returned by face recognizer\n\t\t\t\tlabel_text_gen = subjects_gen[label_gen[0]]\n\t\t\t\tlabel_text_edad = subjects_edad[label_edad[0]]\n\t\t\t\tresult = network.predict(format_image(img))\n\t\t\t\t\t\t\t\t \n\t\t\t\ttext_emo = EMOTIONS[np.argmax(result)]\n\t\t\t\t\t\t\t #draw a rectangle around face detected\n\t\t\t\tdraw_rectangle(img, rect)\n\t\t\t\t\t\t\t#draw name of predicted person\n\t\t\t\tdraw_text(img, label_text_gen+\" \"+label_text_edad+\" \"+text_emo, rect[0], rect[1]-5)\n\t\t\tultim_faces = faces\n\n\n\t\tif (len(faces)== len(ultim_faces)):\n\t\t\tfr=0\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t\t\t\trect=(x,y,w,h)\n\t\t\t\t\t\tlabel_gen= recognizer_gen.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\tlabel_edad= recognizer_edad.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\t#print(label)\n\t\t\t\t\t\t\t#get name of respective label returned by face recognizer\n\t\t\t\t\t\tlabel_text_gen = subjects_gen[label_gen[0]]\n\t\t\t\t\t\tlabel_text_edad = subjects_edad[label_edad[0]]\n\t\t\t\t\t\tresult = network.predict(format_image(img))\n\t\t\t\t\t\t\t\t \n\t\t\t\t\t\ttext_emo = EMOTIONS[np.argmax(result)]\n\t\t\t\t\t\t\t\t #draw a rectangle around face detected\n\t\t\t\t\t\tdraw_rectangle(img, rect)\n\t\t\t\t\t\t\t#draw name of predicted person\n\t\t\t\t\t\tdraw_text(img, label_text_gen+\" \"+label_text_edad+\" \"+text_emo, rect[0], rect[1]-5)\n\t\t\t\t\t\t\n\t\t\tultim_faces = faces\n\t\n\telif(ultim_faces is None):\n\t\tif(faces is None):\n\t\t\tfr+=1\n\t\t\tultim_faces=None\n\t\t\n\t\t\n\t\telif (faces is not None):\n\t\t\tfr+=1\n\t\t\t\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t\t\t\trect=(x,y,w,h)\n\t\t\t\t\t\tlabel_gen= recognizer_gen.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\tlabel_edad= recognizer_edad.predict(gray2[y:y+h,x:x+w])\n\t\t\t\t\t\t#print(label)\n\t\t\t\t\t\t\t#get name of respective label returned by face recognizer\n\t\t\t\t\t\tlabel_text_gen = subjects_gen[label_gen[0]]\n\t\t\t\t\t\tlabel_text_edad = subjects_edad[label_edad[0]]\n\t\t\t\t\t\tresult = network.predict(format_image(img))\n\t\t\t\t\t\t\t\t \n\t\t\t\t\t\ttext_emo = EMOTIONS[np.argmax(result)]\n\t\t\t\t\t\t\t\t #draw a rectangle around face detected\n\t\t\t\t\t\tdraw_rectangle(img, rect)\n\t\t\t\t\t\t\t#draw name of predicted person\n\t\t\t\t\t\tdraw_text(img, label_text_gen+\" \"+label_text_edad+\" \"+text_emo, rect[0], rect[1]-5)\n\t\t\t\t\t\t\n\t\t\t#ultim_faces = faces\n\telif(faces is None):\n\t\tfr+=1\n\t\tultim_faces=None\n\t\t\n\n\treturn img\n\n############################################\nclass EmotionRecognition:\n\n def build_network(self):\n # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py\n print('[+] Building CNN')\n self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])\n self.network = conv_2d(self.network, 64, 5, activation='relu')\n #self.network = local_response_normalization(self.network)\n self.network = max_pool_2d(self.network, 3, strides=2)\n self.network = conv_2d(self.network, 64, 5, activation='relu')\n self.network = max_pool_2d(self.network, 3, strides=2)\n self.network = conv_2d(self.network, 128, 4, activation='relu')\n self.network = dropout(self.network, 0.3)\n self.network = fully_connected(self.network, 3072, activation='relu')\n self.network = fully_connected(\n self.network, len(EMOTIONS), activation='softmax')\n self.network = regression(\n self.network,\n optimizer='momentum',\n loss='categorical_crossentropy'\n )\n self.model = tflearn.DNN(\n self.network,\n checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',\n max_checkpoints=1,\n tensorboard_verbose=2\n )\n \n self.load_model()\n\n \n def predict(self, image):\n if image is None:\n return None\n image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])\n return self.model.predict(image)\n\n def save_model(self):\n self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))\n print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)\n\n def load_model(self):\n if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):\n self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))\n print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)\n\n####################################\n\ndef format_image(image):\n global face, faces\n \n if len(image.shape) > 2 and image.shape[2] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n\n\n\n faces = face_cascade.detectMultiScale(\n image,\n scaleFactor=1.3,\n minNeighbors=5\n )\n # None is we don't found an image\n if not len(faces) > 0:\n return None\n max_area_face = faces[0]\n\n for face in faces:\n \n\n if face[2] * face[3] > max_area_face[2] * max_area_face[3]:\n max_area_face = face\n # Chop image to face\n face = max_area_face\n image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]\n\n # Resize image to network size\n try:\n image = cv2.resize(image, (SIZE_FACE, SIZE_FACE),\n interpolation=cv2.INTER_CUBIC) / 255.\n \n\n except Exception:\n print(\"[+] Problem during resize\")\n return None\n\n \n \n return image\n\n#############################################\nnetwork = EmotionRecognition()\nnetwork.build_network()\na=0\nultima_faces= None\nhora = subprocess.check_output([\"date\", \"+%H-%M-%S\"]) \n#namedWindow(\"img\" , CV_WINDOW_AUTOSIZE)\nwith open('csv_files/'+str(hora)+'.csv', 'w') as csvfile:\n\tcsv_writer = csv.writer(csvfile)\n\twhile (a<80 ):\n\n\t\thora = subprocess.check_output([\"date\", \"+%H-%M-%S\"]) \n\t\tfor index, emotion in enumerate(EMOTIONS):\n\n\t\t feelings_faces.append(cv2.imread(emotion, -1))\n\n\n\t\t\n\t\tret, test_img1 = video_capture.read()\n\n\t \n\t \n\t \n\t\tprint(\"Predicting images...\")\n\n\n\n\n\t#perform a prediction\n\t\tpredicted_img1 = predict(test_img1,hora)\n\t\t#ultima_faces=faces\n\t\tprint(\"masculino \"+str(porc_masc))\n\t\tprint(\"joven \"+str(p_joven))\n\t\tprint(\"neutral \"+str(p_neutral))\n\t\t \n\t\t#cv2.imshow('img',predicted_img1 )\n\n\n\t\ta+=1\n\n\t\t\n\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t break\n\n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n\n#miguel perez migaperezber@hotmail.com\n", "repo_name": "migaperezber1/reconocimiento", "sub_path": "reco.py", "file_name": "reco.py", "file_ext": "py", "file_size_in_byte": 13282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 122, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 128, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 295, "usage_type": "call"}, {"api_name": "tflearn.layers.core.input_data", "line_number": 315, "usage_type": "call"}, {"api_name": "tflearn.layers.conv.conv_2d", "line_number": 316, "usage_type": "call"}, {"api_name": "tflearn.layers.conv.max_pool_2d", "line_number": 318, "usage_type": "call"}, {"api_name": "tflearn.layers.conv.conv_2d", "line_number": 319, "usage_type": "call"}, {"api_name": "tflearn.layers.conv.max_pool_2d", "line_number": 320, "usage_type": "call"}, {"api_name": "tflearn.layers.conv.conv_2d", "line_number": 321, "usage_type": "call"}, {"api_name": "tflearn.layers.core.dropout", "line_number": 322, "usage_type": "call"}, {"api_name": "tflearn.layers.core.fully_connected", "line_number": 323, "usage_type": "call"}, {"api_name": "tflearn.layers.core.fully_connected", "line_number": 324, "usage_type": "call"}, {"api_name": "tflearn.layers.estimator.regression", "line_number": 326, "usage_type": "call"}, {"api_name": "tflearn.DNN", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 353, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 362, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 362, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 364, "usage_type": "call"}, {"api_name": "cv2.CV_LOAD_IMAGE_GRAYSCALE", "line_number": 364, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 389, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 390, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 406, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 409, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 412, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 415, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 443, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 448, "usage_type": "call"}]} +{"seq_id": "10836637066", "text": "import json\n\nimport jsonpickle\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom json import decoder\nimport jsonpickle\n\napp = Flask(__name__)\n\n\nclass Words_stat:\n text = \"\"\n words = []\n count = 0\n\n def __init__(self, text):\n self.text = text\n self.words = self.text.split(' ')\n self.count = len(self.words)\n\n def default(self, o):\n return o.__dict__\n\n def json(self):\n return jsonpickle.encode(self)\n\n##=== = = = = Главная программа === == =\n@app.route(\"/\")\ndef index():\n return \"

Привет! Введите строку

\"\n\n@app.route(\"/words\", methods = ['POST'])\ndef index2():\n words = request.form['words']\n data = []\n\n try:\n dec = json.loads( words)\n print (\"wait data = \", dec [\"data\"] )\n obj = Words_stat(dec[\"data\"])\n\n print(\"obj = \", obj)\n data= obj.json()\n\n except:\n\n obj = Words_stat(words)\n print(\"obj = \", obj)\n\n data = obj.json()\n\n return data\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n\n\n", "repo_name": "shaninandrew/words", "sub_path": "MS_Words.py", "file_name": "MS_Words.py", "file_ext": "py", "file_size_in_byte": 1284, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "jsonpickle.encode", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "29256826263", "text": "from __future__ import absolute_import\nimport logging\nimport pandas as pd\nimport os\nimport sys\nsys.path.append(os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + \"/../../..\"))\n\nfrom scripts.create import insert_many_rows\nfrom taipan.core import polar2cart\n\n\ndef execute(cursor, fields_file=None):\n \"\"\"Load field pointings from file to database\"\"\"\n\n logging.info(\"Loading Centroids\")\n\n if not fields_file:\n logging.info(\"No tiling file passed - aborting loading centroids\")\n return\n\n # Get centroids\n with open(fields_file, 'r') as fileobj:\n datatable = pd.read_csv(fileobj, delim_whitespace=True)\n values = [[index, row['ra'], row['dec']]\n + list(polar2cart((row['ra'], row['dec'])))\n for index, row in datatable.iterrows()]\n\n columns = [\"FIELD_ID\", \"RA\", \"DEC\", \"UX\", \"UY\", \"UZ\"]\n\n # Insert into database\n if cursor is not None:\n insert_many_rows(cursor, \"field\", values, columns=columns)\n logging.info('Loaded Centroids')\n else:\n logging.info('No DB to write to - returning values')\n return values\n\n return\n", "repo_name": "Samreay/TaipanDB", "sub_path": "resources/0.0.1/ingest/loadCentroids.py", "file_name": "loadCentroids.py", "file_ext": "py", "file_size_in_byte": 1128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "taipan.core.polar2cart", "line_number": 25, "usage_type": "call"}, {"api_name": "scripts.create.insert_many_rows", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "43910520562", "text": "import re\nimport subprocess\nimport openpyxl\nfrom openpyxl.styles import Font, PatternFill, NamedStyle, Side, Border\n\n\ndef reader(filename):\n\n users_list = []\n uniq_1 = []\n uniq_2 = []\n uniq_3 = []\n with open(filename, 'r') as f:\n for line in f:\n result = re.split(' ', line, maxsplit=3)\n users_result = [result[0], result[2]]\n users_list.append(users_result)\n for item in users_list:\n count_item = users_list.count(item)\n users_result_count = [item[0], item[1], count_item]\n uniq_1.append(users_result_count)\n for item in uniq_1:\n if item not in uniq_2:\n uniq_2.append(item)\n for item in uniq_2:\n if item[1] == str('Зашел'):\n ad_user_name = item[0]\n cmd = f\"Get-ADUser -Identity {ad_user_name} -Properties * | select SamAccountName, Name, Company, Department, Title, employeeType\"\n info_from_ad = run(cmd)\n user_and_ad_info = [j for i in [item, info_from_ad] for j in i]\n uniq_3.append(user_and_ad_info)\n print(user_and_ad_info)\n return save_result(uniq_3)\n\n\ndef run(cmd):\n rgx = r\"\\:\\s.*\\n\"\n subprocess.run([\"powershell\", \"-Command\", \"chcp 1251\"], capture_output=True)\n completed = subprocess.run([\"powershell\", \"-Command\", cmd], capture_output=True, text=True)\n subprocess_output = completed.stdout\n fixed_subprocess_output = re.findall(rgx, subprocess_output)\n result_subprocess = []\n for item in fixed_subprocess_output:\n replace_item = item.replace(\": \", \"\").replace('\\n', '')\n result_subprocess.append(replace_item)\n return result_subprocess\n\n\ndef rename_column(filename_xlsx):\n book = openpyxl.load_workbook(filename=filename_xlsx)\n sheet : worksheet = book['Users']\n sheet.insert_rows(0)\n sheet['A1'].value = 'Пользователь'\n sheet['B1'].value = 'Статус'\n sheet['C1'].value = 'Количество входов'\n sheet['D1'].value = 'SamAccountName'\n sheet['E1'].value = 'Имя Пользователя'\n sheet['F1'].value = 'Организация'\n sheet['G1'].value = 'Отдел'\n sheet['H1'].value = 'Должность'\n sheet['I1'].value = 'Дирекция'\n\n sheet.column_dimensions['A'].width = 15\n sheet.column_dimensions['B'].width = 12\n sheet.column_dimensions['C'].width = 23\n sheet.column_dimensions['D'].width = 20\n sheet.column_dimensions['E'].width = 35\n sheet.column_dimensions['F'].width = 45\n sheet.column_dimensions['G'].width = 60\n sheet.column_dimensions['H'].width = 30\n sheet.column_dimensions['I'].width = 30\n\n azzcode_style = NamedStyle(name=\"azzcode_style\")\n azzcode_style.font = Font(b=True, size=14, color=\"DD0000\")\n azzcode_style.fill = PatternFill(\"solid\", fgColor=\"FFFF99\")\n side = Side(style='medium', color=\"00EEDD\")\n azzcode_style.border = Border(bottom=side)\n #\n sheet['A1'].style = azzcode_style\n sheet['B1'].style = azzcode_style\n sheet['C1'].style = azzcode_style\n sheet['D1'].style = azzcode_style\n sheet['E1'].style = azzcode_style\n sheet['F1'].style = azzcode_style\n sheet['G1'].style = azzcode_style\n sheet['H1'].style = azzcode_style\n sheet['I1'].style = azzcode_style\n\n sheet.auto_filter.ref = 'A1:H999'\n\n book.save(filename_xlsx)\n\n\ndef save_result(usr):\n\n book = openpyxl.Workbook()\n book.remove(book.active)\n sheet_1 = book.create_sheet('Users')\n for sheet in book.worksheets:\n for row in usr:\n sheet.append(row)\n\n book.save('./result_output.xlsx')\n return rename_column('./result_output.xlsx')\n\n\ndef main():\n filename = './USERLOG.TXT'\n reader(filename)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "mksmvskv/parser_konsultantpluse_logs", "sub_path": "count_of_user_logins.py", "file_name": "count_of_user_logins.py", "file_ext": "py", "file_size_in_byte": 3748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.split", "line_number": 15, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 38, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 39, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 50, "usage_type": "call"}, {"api_name": "openpyxl.styles.NamedStyle", "line_number": 73, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 74, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 75, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 76, "usage_type": "call"}, {"api_name": "openpyxl.styles.Border", "line_number": 77, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "17750367469", "text": "daicoding='utf-8'\nimport os\nfrom torch.utils.data import DataLoader\nfrom lib.dataset import Data\nimport torch.nn.functional as F\nimport torch\nimport cv2\nimport time\nfrom net import Mynet\nimport numpy as np\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nif __name__ == '__main__':\n model_path= 'model/unalign.pth'\n out_path = './VT821'\n data = Data(root='./data/VT821_unalign/',mode='test')\n loader = DataLoader(data, batch_size=1,shuffle=False)\n net = Mynet().cuda()\n print('loading model from %s...' % model_path)\n net.load_state_dict(torch.load(model_path))\n if not os.path.exists(out_path): os.mkdir(out_path)\n time_s = time.time()\n img_num = len(loader)\n net.eval()\n with torch.no_grad():\n for rgb, t, _, (H, W), name in loader:\n print(name[0])\n scores = net(rgb.cuda().float(), t.cuda().float())\n score = F.interpolate(scores[-1], size=(H, W), mode='bilinear', align_corners=True)\n pred = np.squeeze(score.cpu().data.numpy())\n cv2.imwrite(os.path.join(out_path, name[0][:-4] + '.png'), 255 * pred)\n time_e = time.time()\n print('speed: %f FPS' % (img_num / (time_e - time_s)))\n\n\n\n", "repo_name": "lz118/Deep-Correlation-Network", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "61", "api": [{"api_name": "lib.dataset.Data", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 16, "usage_type": "call"}, {"api_name": "net.Mynet", "line_number": 17, "usage_type": "call"}, {"api_name": "net.load_state_dict", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "net.eval", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "3677831528", "text": "#!/usr/bin/python3\n\"\"\"Start a flask app\"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\n\n\n@app.route('/cities_by_states', strict_slashes=False)\ndef cities_by_states():\n \"\"\"states and cities lists\"\"\"\n states = storage.all(State)\n return render_template(\"8-cities_by_states.html\", states=states)\n\n\n@app.teardown_appcontext\ndef close_storage(exception):\n \"\"\"close the current SQLAlchemy session\"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=\"5000\")\n", "repo_name": "svilladaniel/AirBnB_clone_v2", "sub_path": "web_flask/8-cities_by_states.py", "file_name": "8-cities_by_states.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "models.storage.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 14, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "models.storage.close", "line_number": 21, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "20605423114", "text": "\"\"\"ESL-python ex4.9 \nA program to perform a quadratic discriminant analysis by fitting\na separate Gaussian model per class on the vowel data, and compute\nthe misclassification error for the test data.\n\nCreated on Oct 20th. 2018\n@ author Yan\n\"\"\"\n\nimport seaborn as sns; sns.set()\nfrom sklearn.naive_bayes import GaussianNB\n\ndef readData(file_name):\n # read data from file and save them in float list\n f = open(file_name)\n all_lines = f.read().splitlines()[1:]\n x_input = []\n y_output = []\n temp_list = []\n for line in all_lines:\n line = line.strip().split(',')\n y_output.append(float(line[1]))\n for number in line[2:]:\n temp_list.append(float(number))\n x_input.append(temp_list)\n temp_list = []\n return x_input, y_output\n\n# train and predict\ntrain_x, train_y = readData('vowel.train')\ntest_x, test_y = readData('vowel.test')\nmodel = GaussianNB()\nmodel.fit(train_x, train_y)\npredict_y = list(model.predict(test_x))\n\n# compare the results of predictions and the actual\ntrue_num = 0\nfor i in range(len(test_x)):\n if test_y[i] == predict_y[i]:\n true_num += 1\ntrue_rate = true_num/(len(test_x))\n", "repo_name": "stevenzolo/SML_hw", "sub_path": "ex4/vowel_recognize_Gaussian.py", "file_name": "vowel_recognize_Gaussian.py", "file_ext": "py", "file_size_in_byte": 1169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "seaborn.set", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "6175450792", "text": "import json\nimport os\nfrom jsonschema import validate\nimport logging\n\n\ndef open_schema():\n \"\"\"Открываем файл с данными проверки и добавляем в список\"\"\"\n list_file = os.listdir(path='schema')\n app_list = []\n for i in list_file:\n with open(f'schema/{i}', 'r', encoding='utf-8') as f:\n text = json.load(f)\n app_list.append(text)\n return app_list\n\n\ndef open_event():\n \"\"\"Открываем файл с неправильно написанными данными json и добавляем ошибки в файл log\"\"\"\n list_file = os.listdir(path='event')\n\n for i in list_file:\n with open(f'event/{i}', 'r', encoding='utf-8') as f:\n text = json.load(f)\n a = 0\n while a < 4:\n try:\n validate(text, open_schema()[a])\n except Exception:\n pass\n validate(text, open_schema()[a])\n finally:\n logging.exception(\"Oops:\")\n a += 1\n continue\n\n\nlogging.basicConfig(level=logging.DEBUG, filename='myapp.log')\n\n\nif __name__ == '__main__':\n open_schema()\n open_event()\n", "repo_name": "v-svetlakov/script_for_Welltory", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1256, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.listdir", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 28, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "35195802821", "text": "from collections import deque\n\n\nclass QueryController:\n def __init__(self, game_dict):\n self.game_dict = game_dict\n\n @staticmethod\n def is_duplicate_data(game_key):\n if game_key != \"\":\n query = f\"SELECT GAME_ID FROM GAMES WHERE GAME_ID='{game_key}'\"\n return query\n else:\n return \"\"\n\n def game_data_insert(self):\n game_dict = self.game_dict\n if len(game_dict) >= 10:\n # 특수문자 변경\n game_dict['description'] = game_dict['description'].replace(\"\\'\", \"\\\"\")\n game_dict['game_name'] = game_dict['game_name'].replace(\"\\'\", \"\\\"\")\n # GAMES insert 문\n insert_sql = f\"INSERT IGNORE INTO GAMES (GAME_ID, GAME_NAME, GAME_INFO, LAUNCH_DATE, EVALUATION, IMG_URL, VIDEO_URL, DEV_COMPANY, DISTRIBUTOR) VALUES ('{game_dict['game_id']}','{game_dict['game_name']}', '{game_dict['description']}', '{game_dict['launch_date']}', '{game_dict['evaluation']}', '{game_dict['img_url']}', '{game_dict['video_url']}', '{game_dict['company']}', '{game_dict['distributor']}');\"\n return insert_sql\n else:\n print(\"잘못된 딕셔너리 입니다.\")\n return \"\"\n\n def tag_data_insert(self):\n game_dict = self.game_dict\n\n # TAGS insert 문\n tag_query = \"INSERT IGNORE INTO TAGS (TAG_ID, TAG_NAME) VALUES \"\n\n # TAGS insert VALUES 추가\n if len(game_dict) >= 10:\n tags = deque(game_dict['tags'])\n # tags 크기만큼 loop\n while True:\n # 마지막 데이터는 쿼리문을 닫는다.\n if len(tags) > 1:\n tag = tags.popleft()\n tag_query = tag_query + f\"('{tag[0]}', '{tag[1]}'), \"\n else:\n tag = tags.popleft()\n tag_query = tag_query + f\"('{tag[0]}', '{tag[1]}');\"\n break\n\n # TAGS insert 문 return\n return tag_query\n else:\n print(\"잘못된 딕셔너리 입니다.\")\n return \"\"\n\n def game_tags_insert(self):\n game_dict = self.game_dict\n\n # GAME_TAGS 쿼리\n game_tag_query = \"INSERT IGNORE INTO GAME_TAGS (GAME_ID, TAG_ID) VALUES \"\n\n # TAGS insert VALUES 추가\n if len(game_dict) >= 10:\n tags = deque(game_dict['tags'])\n\n while True:\n if len(tags) > 1:\n tag = tags.popleft()\n game_tag_query = game_tag_query + f\"('{game_dict['game_id']}', '{tag[0]}'), \"\n else:\n game_tag_query = game_tag_query + f\"('{game_dict['game_id']}', '{tag[0]}');\"\n break\n\n return game_tag_query\n else:\n print(\"잘못된 딕셔너리 입니다.\")\n return \"\"\n", "repo_name": "dldydtjs2965/steammer-api", "sub_path": "database/database_controller.py", "file_name": "database_controller.py", "file_ext": "py", "file_size_in_byte": 2850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 37, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "5854342739", "text": "\nimport os,pathlib\nimport numpy as np\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\nimport spm1d\n\n\ndef load_csv(fname):\n\ta = np.loadtxt(fname, delimiter=',')\n\tg,y = a[:,0], a[:,1:]\n\ty0,y1 = y[g==0], y[g==1]\n\treturn y0,y1\n\n\n\n#(0) Load the 20 datasets and analyze using SPM:\ndirREPO = pathlib.Path( __file__ ).parent.parent\ndirDATA = os.path.join( dirREPO, 'Data', '20datasets')\nalpha = 0.05\nY0,Y1 = [], [] # datasets\nT = [] # t-value continua\ntcrit_spm = [] # critical t values\nfor i in range(20):\n\tfnameCSV = os.path.join(dirDATA, f'{i+1}.csv')\n\ty0,y1 = load_csv( fnameCSV )\n\tti = spm1d.stats.ttest2(y0, y1).inference(alpha, two_tailed=True)\n\ttcrit_spm.append( ti.zstar )\n\tY0.append( y0 )\n\tY1.append( y1 )\n\tT.append( ti.z )\n\n\n\n#(1) Load the LAAST results (see \"./R/sim_2samp_20datasets.R\")\nfnameLAAST = os.path.join( dirREPO, 'Data', '20datasets-laast-results.csv')\na = np.loadtxt(fnameLAAST, delimiter=',', skiprows=1)\npcrit_laast = a[:,1]\ntcrit_laast = stats.t.isf( pcrit_laast, 8 ) # approximate LAAST critical threhold\n\n\n\n\n#(2) Plot:\nplt.close('all')\nfig,AX = plt.subplots( 5, 8, figsize=(12,8) )\nAX0 = AX[:,::2] # axes for datasets\nAX1 = AX[:,1::2] # axes for t values\nfontname = 'Helvetica'\ncolor_rft = 'red'\ncolor_laast = (0.3,0.3,0.7)\nfor i,(ax0,ax1,y0,y1,t) in enumerate( zip(AX0.ravel(), AX1.ravel(), Y0, Y1, T) ):\n\t# plot dataset:\n\tax0.plot(y0.T, 'k', lw=0.5)\n\tax0.plot(y1.T, 'c', lw=0.5)\n\t# plot statistical results:\n\tax1.plot( t, color='0.7', lw=3, label='t value' )\n\tax1.axhline( 0, color='k', ls='-', lw=0.3 )\n\tax1.axhline( tcrit_spm[i], color=color_rft, ls='--', label='RFT' )\n\tax1.axhline( tcrit_laast[i], color=color_laast, ls='--', label='LAAST' )\n\t# dataset label:\n\tax0.text(0.5, 1.1, f'Dataset {i+1}', color='k', fontweight='bold', ha='center', size=10, transform=ax0.transAxes, name=fontname)\n\t# H0 rejection checks:\n\ttmax = t.max()\n\tif tmax > tcrit_spm[i]:\n\t\tax1.text(0.5, 0.20, 'Significant', color=color_rft, fontweight='bold', ha='center', size=8, transform=ax1.transAxes, name=fontname)\n\tif tmax > tcrit_laast[i]:\n\t\tax1.text(0.5, 0.05, 'Significant', color=color_laast, fontweight='bold', ha='center', size=8, transform=ax1.transAxes, name=fontname)\n\tif (tmax > tcrit_spm[i]) or (tmax > tcrit_laast[i]):\n\t\tax1.set_facecolor( '0.8' )\nplt.setp(AX, ylim=(-5, 5))\nleg0 = AX[0,0].legend( [AX[0,0].lines[0], AX[0,0].lines[-1]], ['Group A', 'Group B'], bbox_to_anchor=(0.91,0.25), loc='upper right' )\nleg1 = AX[0,1].legend( bbox_to_anchor=(0.91,0.25), loc='upper right' )\nplt.setp( leg0.get_texts() + leg1.get_texts(), name=fontname, fontsize=8 )\n[plt.setp(ax, xticklabels=(), yticklabels=()) for ax in AX.ravel()[1:]]\n[plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), size=7, name=fontname) for ax in AX.ravel()]\nplt.tight_layout()\nplt.show()\n\n\n\n# save:\nfnamePDF = os.path.join( dirREPO, 'Figures', 'fig_2samp_20datasets.pdf' )\nplt.savefig( fnamePDF )\n", "repo_name": "0todd0000/laast-eval", "sub_path": "Python/fig_2samp_20datasets.py", "file_name": "fig_2samp_20datasets.py", "file_ext": "py", "file_size_in_byte": 2993, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.loadtxt", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "spm1d.stats.ttest2", "line_number": 27, "usage_type": "call"}, {"api_name": "spm1d.stats", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.stats.t.isf", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.stats.t", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "41787621045", "text": "# -*- coding: utf-8 -*-\nimport re\nimport json\nfrom collections import namedtuple\nfrom scrapy import FormRequest, Field, Item, Request\nfrom product_ranking.spiders import BaseProductsSpider\n\n\nclass InstagramUsersItem(Item):\n username = Field()\n followers = Field()\n total_posts = Field()\n posts = Field()\n\n # Search metadata.\n site = Field() # String.\n search_term = Field() # String.\n ranking = Field() # Integer.\n total_matches = Field() # Integer.\n results_per_page = Field() # Integer.\n scraped_results_per_page = Field() # Integer.\n search_term_in_title_exactly = Field()\n search_term_in_title_partial = Field()\n search_term_in_title_interleaved = Field()\n _statistics = Field()\n\n\nclass InstagramCrawlerSpider(BaseProductsSpider):\n name = \"instagram_users_products\"\n allowed_domains = [\"instagram.com\"]\n posts_url = 'https://www.instagram.com/query/'\n\n def __init__(self, *args, **kwargs):\n super(InstagramCrawlerSpider, self).__init__(\n site_name=\"instagram.com\",\n *args, **kwargs)\n self.product_url = kwargs['product_url']\n\n self.comments = []\n self.likes = []\n self.num_pages = 1\n\n @staticmethod\n def valid_url(url):\n if not re.findall(r\"http(s){0,1}\\:\\/\\/\", url):\n url = \"http://\" + url\n return url\n\n def start_requests(self):\n yield Request(url=self.valid_url(self.product_url),\n meta={'remaining': 99999,\n 'search_term': ''},\n callback=self._parse_single_product)\n\n def _parse_single_product(self, response):\n # extracting json data from