diff --git "a/1497.jsonl" "b/1497.jsonl"
new file mode 100644--- /dev/null
+++ "b/1497.jsonl"
@@ -0,0 +1,388 @@
+{"seq_id":"24285017621","text":"# For all preprocessing methods applied on a feature, $D_{*j}$, in $D$, what is the change in feature spread.\n# Input: D - dataframe\n\nimport pymongo\nimport pandas as pd\nimport pprint\nimport time\nimport sys\nimport random\n\ndef get_random_feature():\n\t# Get output_entities collection\n\toutput_entities = db['output_entities']\n\n\t# Get random document on output_entities collection\n\trandom_ent = list(output_entities.aggregate([{'$sample': {'size': 1}}]))\n\t\n\t# Get feature_name\n\tfeature_name = random_ent[0]['attributes']['feature_name']\n\t#feature_name = 'checking'\n\n\treturn feature_name\n\ndef get_dataset_spread(invalid_items, new_items):\n\t# Get all preprocessing methods id:\n\tact_new = [e['_id'] for e in new_items]\n\tact_invalid = [e['_id'] for e in invalid_items]\n\tacts_id = list(set(act_new + act_invalid))\n\n\t# Iterate all preprocessing methods related to feature $D_{*j}$:\n\tfor i in acts_id:\n\t\t# Get the activity identifier:\n\t\tact_identifier = i\n\t\t# Get the related invalidated entities id:\n\t\tinvalidated_entities = [e['invalidated_entities'] for e in invalid_items if e['_id'] == act_identifier]\n\n\t\tif invalidated_entities:\n\t\t\tinvalidated_entities = invalidated_entities[0]\n\t\t\t# Get the values of the invalidated entities\n\t\t\tinvalid_values = entities.aggregate([ \\\n\t\t\t\t{'$match': {'identifier':{'$in':invalidated_entities}}}, \\\n\t\t\t\t{'$project': {'invalid_values': '$attributes.value', '_id': 0}} \\\n\t\t\t])\n\t\t\tinvalid_values = list(invalid_values)\n\t\t\tinvalid_values = [d['invalid_values'] for d in invalid_values]\n\n\t\t# Get the related new entities id:\n\t\tnew_entities = [e['new_entities'] for e in new_items if e['_id'] == act_identifier]\n\n\t\tif new_entities:\n\t\t\tnew_entities = new_entities[0]\n\t\t\t# Get the values of the new entities\n\t\t\tnew_values = entities.aggregate([ \\\n\t\t\t\t{'$match': {'identifier':{'$in':new_entities}}}, \\\n\t\t\t\t{'$project': {'new_values': '$attributes.value', '_id': 0}} \\\n\t\t\t])\n\t\t\tnew_values = list(new_values)\n\t\t\tnew_values = [d['new_values'] for d in new_values]\n\n\t\tprint('------------------------------------------')\n\t\tprint('Activity identifier: ' + str(act_identifier))\n\t\tprint('Number of invalidated items: ' + str(len(invalidated_entities)))\n\t\tprint('Number of new items: ' + str(len(new_entities)))\n\t\tif invalidated_entities:\n\t\t\tprint('Max invalid_values ' + max(invalid_values))\n\t\tif new_entities:\n\t\t\tprint('Max new_values ' + max(new_values))\n\n\ndef get_items(feature_name):\n\t# Get activities relatetd to feature_name\n\tacts = activities.find({'attributes.features_name': {'$regex': '.*' + feature_name + '*.'}}, {'identifier': 1, '_id': 0}).distinct('identifier')\n\n\t# Get invalidated items for all preprocessing methods related to feature $D_{*j}$:\n\tinvalid_items = relations.aggregate([ \\\n\t\t{'$match': {'prov:relation_type': 'wasInvalidatedBy', 'prov:activity': {'$in': acts}}}, \\\n\t\t{'$group': {'_id': '$prov:activity', 'invalidated_entities': {'$addToSet': '$prov:entity'}}} \n\t])\n\n\t# Get new items for all preprocessing methods related to feature $D_{*j}$:\n\tnew_items = relations.aggregate([ \\\n\t\t{'$match': {'prov:relation_type': 'wasGeneratedBy', 'prov:activity': {'$in': acts}}}, \\\n\t\t{'$group': {'_id': '$prov:activity', 'new_entities': {'$addToSet': '$prov:entity'}}} \n\t])\n\n\treturn (invalid_items, new_items)\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) == 2 :\n\t\tdbname = sys.argv[1]\n\n\t\t# Connect with MongoClient on the default host and port:\n\t\tclient = pymongo.MongoClient('localhost', 27017)\n\n\t\t# Getting a Database:\n\t\tdb = client[dbname]\n\n\t\t# Get entities, activities and relations mongodb collection:\n\t\tentities = db.entities\n\t\tactivities = db.activities\n\t\trelations = db.relations\n\n\t\tfeature_name = get_random_feature()\n\n\t\tprint('Feature Spread of: ' + feature_name)\n\n\t\ttime1 = time.time()\n\t\tinvalid_items, new_items = get_items(feature_name)\n\t\tinvalid_items = list(invalid_items)\n\t\tnew_items = list(new_items)\n\n\t\tif ((invalid_items) or (new_items)):\n\t\t\tget_dataset_spread(invalid_items, new_items)\n\t\telse:\n\t\t\tprint('No operation on '+ feature_name )\n\n\t\ttime2 = time.time()\n\n\t\ttext = '{:s} function took {:.3f} sec.'.format('Feature Spread', (time2-time1))\n\t\tprint(text)\n\t\t\n\t\t# Close Mongodb connection:\n\t\tclient.close()\n\telse:\n\t\tprint('[ERROR] usage: feture_spread.py ')","repo_name":"GiuliaSim/DataProvenance","sub_path":"queries/feature_spread.py","file_name":"feature_spread.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1549062369","text":"\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nimport os\nimport utm\nimport rasterio\nfrom tqdm import tqdm\n#from xml.etree import ElementTree as et\nimport xmltodict\n\n##\ndef cloud_masking(image,cld):\n cloud_mask = cld > 30\n band_mean = image.mean()\n image[cloud_mask] = band_mean\n return image\n\n##\ndef load_file(fp):\n \"\"\"Takes a PosixPath object or string filepath\n and returns np array\"\"\"\n\n return np.array(Image.open(fp.__str__()))\n\ndef paths (name): \n\n fold_band_10 = glob(name+\"/GRANULE/*/IMG_DATA/R10m\")[0]\n fold_band_20 = glob(name+\"/GRANULE/*/IMG_DATA/R20m\")[0]\n fold_band_60 = glob(name+\"/GRANULE/*/IMG_DATA/R60m\")[0]\n path = name+\"/GRANULE/*/IMG_DATA/R10m\"+\"/*.jp2\"\n x = glob(path)\n lists = x[0].split(\"/\")[-1].split(\"_\")\n fixe = lists[0]+'_'+lists[1]\n\n band_10 = ['B02', 'B03', 'B04','B08']\n band_20 = ['B05', 'B06', 'B07','B8A','B11', 'B12']\n band_60 = ['B01','B09']\n images_name_10m = [fixe+\"_\"+band+\"_10m.jp2\" for band in band_10 ]\n images_name_20m = [fixe+\"_\"+band+\"_20m.jp2\" for band in band_20 ]\n images_name_60m = [fixe+\"_\"+band+\"_60m.jp2\" for band in band_60 ]\n #\n bandes_path_10 = [os.path.join(fold_band_10,img) for img in images_name_10m]\n bandes_path_20 = [os.path.join(fold_band_20,img) for img in images_name_20m]\n bandes_path_60 = [os.path.join(fold_band_60,img) for img in images_name_60m]\n #\n tile_path = name+\"/INSPIRE.xml\"\n path_cld_20 = glob(name+\"/GRANULE/*/QI_DATA/MSK_CLDPRB_20m.jp2\")[0]\n path_cld_60 = glob(name+\"/GRANULE/*/QI_DATA/MSK_CLDPRB_60m.jp2\")[0]\n\n return bandes_path_10,bandes_path_20,bandes_path_60,tile_path,path_cld_20,path_cld_60\n\n##\ndef coords_to_pixels(ref, utm, m=10):\n \"\"\" Convert UTM coordinates to pixel coordinates\"\"\"\n\n x = int((utm[0] - ref[0])/m)\n y = int((ref[1] - utm[1])/m)\n\n return x, y\n\n##\ndef extract_sub_image(bandes_path,tile_path,area,resolution=10, d= 3, cld_path = None):\n \n xml_file=open(tile_path,\"r\")\n xml_string=xml_file.read()\n python_dict=xmltodict.parse(xml_string)\n tile_coordonnates = python_dict[\"gmd:MD_Metadata\"][\"gmd:identificationInfo\"][\"gmd:MD_DataIdentification\"][\"gmd:abstract\"][\"gco:CharacterString\"].split()\n\n # S2 tile coordonnates\n lat,lon = float(tile_coordonnates[0]),float(tile_coordonnates[1])\n tile_coordonnate = [lat,lon]\n\n refx, refy, _, _ = utm.from_latlon(tile_coordonnate[0], tile_coordonnate[1])\n ax,ay,_,_ = utm.from_latlon(area[1],area[0]) # lat,lon\n \n ref = [refx, refy]\n utm_cord = [ax,ay]\n x,y = coords_to_pixels(ref,utm_cord,resolution)\n \n images = []\n # sub_image_extraction\n for band_path in tqdm(bandes_path, total=len(bandes_path)):\n image = load_file(band_path).astype(np.float32)\n if resolution==60:\n sub_image = image[y,x]\n images.append(sub_image)\n \n else:\n sub_image = image[y-d:y+d,x-d:x+d]\n images.append(sub_image)\n\n images = np.array(images)\n \n\n # verify if the study are is cloudy\n if cld_path is not None:\n cld_mask = load_file(cld_path).astype(np.float32)\n cld = cld_mask[y-d:y+d,x-d:x+d]\n # cloud removing\n images = cloud_masking(images,cld)\n\n if resolution==60:\n return images\n else:\n return images.mean((1,2))\n \n\ndef ndvi(area, tile_name):\n \"\"\"\n polygone: (lon,lat) format\n tile_name: name of tile with the most low cloud coverage\n \"\"\"\n #Extract tile coordonnates (lat,long)\n tile_path = tile_name+\"/INSPIRE.xml\"\n xml_file=open(tile_path,\"r\")\n xml_string=xml_file.read()\n python_dict=xmltodict.parse(xml_string)\n tile_coordonnates = python_dict[\"gmd:MD_Metadata\"][\"gmd:identificationInfo\"][\"gmd:MD_DataIdentification\"][\"gmd:abstract\"][\"gco:CharacterString\"].split()\n\n # S2 tile coordonnates\n lat,lon = float(tile_coordonnates[0]),float(tile_coordonnates[1])\n tile_coordonnate = [lat,lon]\n\n refx, refy, _, _ = utm.from_latlon(tile_coordonnate[0], tile_coordonnate[1])\n ax,ay,_,_ = utm.from_latlon(area[1],area[0]) # lat,lon\n \n ref = [refx, refy]\n utm_cord = [ax,ay]\n x,y = coords_to_pixels(ref,utm_cord)\n\n # read images\n path_4 = tile_name+\"/GRANULE/*/IMG_DATA/R10m/*_B04_10m.jp2\"\n path_8 = tile_name+\"/GRANULE/*/IMG_DATA/R10m/*_B08_10m.jp2\"\n red_object = rasterio.open(glob(path_4)[0])\n nir_object = rasterio.open(glob(path_8)[0])\n red = red_object.read()\n nir = nir_object.read()\n red,nir = red[0],nir[0]\n # extract area and remove unsigne\n sub_red = red[y-3:y+3,x-3:x+3].astype(np.float16)\n sub_nir = nir[y-3:y+3,x-3:x+3].astype(np.float16)\n \n # NDVI\n ndvi_image = ((sub_nir - sub_red)/(sub_nir+sub_red))\n ndvi_mean_value = ndvi_image.mean()\n \n return ndvi_mean_value\n ","repo_name":"data354/Biomass","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13364058719","text":"from abc import ABC\nfrom typing import Callable, Dict, Any, Awaitable\n\nfrom aiogram import Bot, Dispatcher, BaseMiddleware\n\nfrom aiogram.types import Message, CallbackQuery\n\n\nclass UtilsMiddleware(BaseMiddleware, ABC):\n\n def __init__(self, bot: Bot, dp: Dispatcher):\n super().__init__()\n self.bot = bot\n self.dp = dp\n\n async def on_pre_process_message(self, message: Message, data: dict):\n data[\"bot\"] = self.bot\n data['dp'] = self.dp\n\n async def __call__(\n self,\n handler: Callable[[Message, Dict[str, Any]], Awaitable[Any]],\n event: Message,\n data: Dict[str, Any]\n ):\n data['dp'] = self.dp\n return await handler(event, data)\n\n\nclass WorkersCallbackMiddleware(BaseMiddleware, ABC):\n\n async def __call__(\n self,\n handler: Callable[[CallbackQuery, Dict[str, Any]], Awaitable[Any]],\n event: CallbackQuery,\n data: Dict[str, Any]\n ):\n data[\"worker_id\"] = event.data.split('_')[-1]\n return await handler(event, data)\n","repo_name":"devtolmachev/Better","sub_path":"utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"73181861315","text":"def can_be_segmented(s, dictionary):\n n = len(s)\n dp = [False] * (n + 1)\n dp[0] = True\n\n for i in range(1, n + 1):\n for word in dictionary:\n if dp[i - len(word)] and s[i - len(word):i] == word:\n dp[i] = True\n\n return dp[n]\n\n# Input handling\ns = input().strip()\nn = int(input())\ndictionary = [input().strip() for _ in range(n)]\n\n# Check if the string can be segmented using the dictionary\nresult = can_be_segmented(s, dictionary)\n\n# output\nprint(\"true\" if result else \"false\")\n","repo_name":"Rohit177/100-day-of-coding-sprint-Unstop.com","sub_path":"Day-3/Code-2 Mocha’s Alien dictionary/Mochas_alian_solution.py","file_name":"Mochas_alian_solution.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"30085365153","text":"from django.contrib import messages\nfrom django.shortcuts import redirect, render\nimport bcrypt\nfrom .decorators import login_required\nfrom .models import *\n\n@login_required\ndef index(request):\n current_user = request.session['user']['id']\n owner = Trip.objects.all().filter(manager=current_user)\n travelers = Trip.objects.all().filter(traveler=current_user).exclude(manager=current_user)\n trips = Trip.objects.all().exclude(traveler=current_user).exclude(manager=current_user)\n context = {\n \"owners\" : owner,\n \"travelers\": travelers,\n \"trips\" : trips\n }\n return render(request, 'travel_section.html', context)\n\n@login_required\ndef add(request):\n\n return render(request, 'add.html')\n\ndef add_trip(request):\n try:\n datetime.datetime.strptime(request.POST['start_date'], '%Y-%m-%d')\n except:\n messages.error(request, \"ingrese una fecha\")\n return redirect(\"/add\")\n try:\n datetime.datetime.strptime(request.POST['end_date'], '%Y-%m-%d')\n except:\n messages.error(request, \"ingrese una fecha\")\n return redirect(\"/add\")\n errors = Trip.objects.validador_basico(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(\"/add\")\n \n current_user = request.session['user']['id']\n in_manager = User.objects.get(id=current_user)\n # in_traveler = User.objects.get(id=current_user)\n in_destination=request.POST['destination']\n in_description=request.POST['description']\n in_start_date=request.POST['start_date']\n in_end_date=request.POST['end_date']\n new_trip = Trip.objects.create(manager=in_manager,\n # traveler=in_traveler,\n destination=in_destination,\n description=in_description,\n start_date=in_start_date,\n end_date=in_end_date)\n \n new_trip.traveler.add(in_manager)\n\n return redirect(\"/travels\")\n\n# author.books.remove(book)\n# INSTANCIA_TRIP . VARIABLE OBJETIVO . COMANDO ( INSTANCIA_USER )\n\ndef cancel_trip(request, trip_id):\n current_user_id = request.session['user']['id']\n current_user = User.objects.get(id=current_user_id)\n trip = Trip.objects.get(id=int(trip_id))\n trip.traveler.remove(current_user)\n return redirect(\"/travels\")\n\ndef delete_trip(request, trip_id):\n Trip.objects.filter(id=int(trip_id)).delete()\n return redirect(\"/travels\")\n\ndef join_trip(request, trip_id):\n current_user_id = request.session['user']['id']\n current_user = User.objects.get(id=current_user_id)\n trip = Trip.objects.get(id=int(trip_id))\n trip.traveler.add(current_user)\n return redirect(\"/travels\")\n\ndef view_trip(request, trip_id):\n trip = Trip.objects.get(id=int(trip_id))\n manager = User.objects.filter(managers = trip)\n travelers = User.objects.filter(travelers = trip).exclude(managers = trip)\n\n context = {\n \"trip\":trip,\n \"manager\":manager,\n \"travelers\":travelers\n }\n\n return render(request, 'view.html', context)","repo_name":"RLAlfaro/travel_buddy","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"777497607","text":"''' \n個人股票、外匯清單\n'''\nfrom pymongo import MongoClient\nimport urllib.parse\nimport datetime\nimport stockprice\nimport EXRate\nfrom EXRate import *\ncurrency_list = { \n \"USD\" : \"美元\",\n \"JPY\": \"日圓\",\n \"HKD\" :\"港幣\",\n \"GBP\": \"英鎊\",\n \"AUD\": \"澳幣\",\n \"CAD\" : \"加拿大幣\",\n \"CHF\" : \"瑞士法郎\", \n \"SGD\" : \"新加坡幣\",\n \"ZAR\" : \"南非幣\",\n \"SEK\" : \"瑞典幣\",\n \"NZD\" : \"紐元\", \n \"THB\" : \"泰幣\", \n \"PHP\" : \"菲國比索\", \n \"IDR\" : \"印尼幣\", \n \"KRW\" : \"韓元\", \n \"MYR\" : \"馬來幣\", \n \"VND\" : \"越南盾\", \n \"CNY\" : \"人民幣\",\n }\n \n# Authentication Database認證資料庫\nstockDB='mystock'\ncurrencyDB = 'mycurrency'\n# DB connection\ndef constructor_stock(): \n client = MongoClient(\"URL\")\n db = client[stockDB]\n return db\n\ndef constructor_currency():\n client = MongoClient(\"URL\")\n db = client[currencyDB]\n return db\n\n# ---------------------------- 新增 修改 刪除前 先確認股票清單中是否有該檔股票--------------------------\ndef query_stock(user_name, stockNumber):\n db = constructor_stock()\n collect = db[user_name]\n dataList = collect.find_one({\"favorite_stock\": stockNumber})\n if dataList == None:\n print(\"none\")\n return dataList\n\n#----------------------------更新暫存的股票名稱--------------------------\ndef update_my_stock(user_name, stockNumber, condition , target_price):\n db=constructor_stock()\n collect = db[user_name]\n collect.update_many({\"favorite_stock\": stockNumber }, {'$set': {'condition':condition , \"price\": target_price}})\n content = f\"股票{stockNumber}更新成功\"\n return content\n\n# ---------------- 秀出使用者的股票清單 ----------------\ndef show_my_stock(userID, user_name, msg):\n db = constructor_stock()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的股票清單為空,請透過指令新增股票至清單中\"\n content = \"\"\n for i in range(len(dataList)):\n content += stockprice.getprice(dataList[i][\"favorite_stock\"], msg)\n return content\n# ----------- 新增使用者的股票 -------------\ndef write_my_stock(userID, user_name, stockNumber, condition , target_price):\n db=constructor_stock()\n collect = db[user_name]\n is_exit = collect.find_one({\"favorite_stock\": stockNumber})\n if is_exit != None :\n content = update_my_stock(user_name, stockNumber, condition , target_price)\n return content\n else:\n collect.insert_one({\n \"userID\": userID,\n \"favorite_stock\": stockNumber,\n \"condition\" : condition,\n \"price\" : target_price,\n \"tag\": \"stock\",\n \"date_info\": datetime.datetime.now()\n })\n return f\"{stockNumber}已新增至您的股票清單\"\n \n# ---------------- 刪除使用者特定的股票 ----------------\ndef delete_my_stock(user_name, stockNumber):\n db = constructor_stock()\n collect = db[user_name]\n collect.delete_one({'favorite_stock': stockNumber})\n return stockNumber + \"刪除成功\"\n\n# ---------------- 刪除使用者股票清單內所有的股票 ----------------\ndef delete_my_allstock(user_name, userID):\n db = constructor_stock()\n collect = db[user_name]\n collect.delete_many({'userID': userID})\n return \"全部股票刪除成功\"\n\n# ---------------- 秀出使用者的股票條件 ----------------\ndef show_stock_setting(user_name, userID):\n db = constructor_stock()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的股票清單為空,請透過指令新增股票至清單中\"\n content = \"您清單中的選股條件為: \\n\"\n for i in range(len(dataList)):\n content += f'{dataList[i][\"favorite_stock\"]} {dataList[i][\"condition\"]} {dataList[i][\"price\"]}\\n'\n return content\n\n#---------------------------- 更新匯率清單的匯率 --------------------------\ndef update_my_currency(user_name, currency, condition , target_price):\n db=constructor_currency()\n collect = db[user_name]\n collect.update_many({\"favorite_currency\": currency }, {'$set': {'condition':condition , \"price\": target_price}})\n return f\"{currency_list[currency]}更新成功\"\n\n#---------------------------- 新增匯率至匯率清單 --------------------------\ndef write_my_currency(userID , user_name, currency, condition, target_price):\n db = constructor_currency()\n collect = db[user_name]\n is_exit = collect.find_one({\"favorite_currency\": currency})\n content = \"\"\n if is_exit != None : return update_my_currency(user_name, currency, condition , target_price)\n else:\n collect.insert_one({\n \"userID\": userID,\n \"favorite_currency\": currency,\n \"condition\" : condition,\n \"price\" : target_price,\n \"tag\": \"currency\",\n \"date_info\": datetime.datetime.now()\n })\n return f\"{currency_list[currency]}已新增至您的外幣清單\"\n\n#---------------------------- 查詢匯率清單的匯率(文字) --------------------------\ndef show_my_currency(userID, user_name):\n db = constructor_currency()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的外幣清單為空,請透過指令新增外幣至清單中\"\n content = \"\"\n for i in range(len(dataList)):\n content += EXRate.showCurrency(dataList[i][\"favorite_currency\"]) \n return content\n\n\n# ---------------- 刪除使用者清單特定的匯率 ----------------\ndef delete_my_currency(user_name, currency):\n db = constructor_currency()\n collect = db[user_name]\n collect.delete_one({'favorite_currency': currency})\n return currency_list[currency] + \"刪除成功\"\n\n\n#---------------------------- 刪除匯率清單全部匯率 --------------------------\ndef delete_my_allcurrency(user_name, userID):\n db = constructor_currency()\n collect = db[user_name]\n collect.delete_many({'userID': userID})\n return \"外幣清單已清空\"\n\n# ---------------- 查詢使用者的匯率設定條件 ----------------\ndef show_currency_setting(user_name, userID):\n db = constructor_currency()\n collect = db[user_name]\n dataList = list(collect.find({\"userID\": userID}))\n if dataList == []: return \"您的外幣清單為空,請透過指令新增外幣至清單中\"\n content = \"您清單中外幣篩選條件為: \\n\"\n for i in range(len(dataList)):\n content += f'{dataList[i][\"favorite_currency\"]} {dataList[i][\"condition\"]} {dataList[i][\"price\"]}\\n'\n return content\n","repo_name":"ChenTsungYu/stock_linebot_public","sub_path":"mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":6880,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"61"}
+{"seq_id":"35369598465","text":"import sys\nimport pandas as pd\nimport numpy as np\nimport json\nimport os\nfrom datetime import date\nfrom scipy.stats import linregress\nimport yaml\nfrom momentum_data import cfg\n\nDIR = os.path.dirname(os.path.realpath(__file__))\n\npd.set_option('display.max_rows', None)\npd.set_option('display.width', None)\npd.set_option('display.max_columns', None)\n\ntry:\n with open('config.yaml', 'r') as stream:\n config = yaml.safe_load(stream)\nexcept FileNotFoundError:\n config = None\nexcept yaml.YAMLError as exc:\n print(exc)\n\nPRICE_DATA = os.path.join(DIR, \"data\", \"price_history.json\")\nACCOUNT_VALUE = cfg(\"CASH\")\nRISK_FACTOR_CFG = cfg(\"RISK_FACTOR\")\nRISK_FACTOR = RISK_FACTOR_CFG or 0.002\nMAX_STOCKS = cfg(\"STOCKS_COUNT_OUTPUT\")\nSLOPE_DAYS = cfg(\"MOMENTUM_CALCULATION_PAST_DAYS\")\nPOS_COUNT_TARGET = cfg(\"POSITIONS_COUNT_TARGET\")\nMAX_GAP = cfg(\"EXCLUDE_MAX_GAP_PCT\")\nEXCLUDE_MA_CROSSES = cfg(\"EXCLUDE_ALL_MA_CROSSES\")\n\nTITLE_RANK = \"Rank\"\nTITLE_TICKER = \"Ticker\"\nTITLE_SECTOR = \"Sector\"\nTITLE_UNIVERSE = \"Universe\"\nTITLE_PERCENTILE = \"Percentile\"\nTITLE_MOMENTUM = \"Momentum (%)\"\nTITLE_RISK = \"ATR20d\"\nTITLE_PRICE = \"Price\"\nTITLE_SHARES = \"Shares\"\nTITLE_POS_SIZE = \"Position ($)\"\nTITLE_SUM = \"Sum ($)\"\n\nif not os.path.exists('output'):\n os.makedirs('output')\n\ndef read_json(json_file):\n with open(json_file, \"r\") as fp:\n return json.load(fp)\n\ndef momentum(closes):\n \"\"\"Calculates slope of exp. regression normalized by rsquared\"\"\"\n returns = np.log(closes)\n indices = np.arange(len(returns))\n slope, _, r, _, _ = linregress(indices, returns)\n # return ((1 + slope) ** 253) * (r**2)\n return (((np.exp(slope) ** 252) - 1) * 100) * (r**2)\n\ndef atr_20(candles):\n \"\"\"Calculates last 20d ATR\"\"\"\n daily_atrs = []\n for idx, candle in enumerate(candles):\n high = candle[\"high\"]\n low = candle[\"low\"]\n prev_close = 0\n if idx > 0:\n prev_close = candles[idx - 1][\"close\"]\n daily_atr = max(high-low, np.abs(high - prev_close), np.abs(low - prev_close))\n daily_atrs.append(daily_atr)\n return pd.Series(daily_atrs).rolling(20).mean().tail(1).item()\n\ndef calc_stocks_amount(account_value, risk_factor, risk_input):\n return (np.floor(account_value * risk_factor / risk_input)).astype(int)\n\ndef calc_pos_size(amount, price):\n return np.round(amount * price, 2)\n\ndef calc_sums(account_value, pos_size):\n sums = []\n sum = 0\n stocks_count = 0\n for position in list(pos_size):\n sum = sum + position\n sums.append(sum)\n if sum < account_value:\n stocks_count = stocks_count + 1\n return (sums, stocks_count)\n\ndef positions():\n \"\"\"Returns a dataframe doubly sorted by momentum factor, with atr and position size\"\"\"\n json = read_json(PRICE_DATA)\n momentums = {}\n ranks = []\n for ticker in json:\n try:\n closes = list(map(lambda candle: candle[\"close\"], json[ticker][\"candles\"]))\n if closes and len(closes) >= 250:\n closes_series = pd.Series(closes)\n slope_series = closes_series.tail(SLOPE_DAYS[0])\n mas = closes_series.rolling(100).mean().tail(SLOPE_DAYS[0])\n ma_is_crossed = False\n if (EXCLUDE_MA_CROSSES):\n ma_crosses = slope_series < mas\n ma_crosses = ma_crosses.where(ma_crosses == True).dropna()\n ma_is_crossed = ma_crosses.size > 0\n # calculate gaps of the last 90 days\n diffs = np.abs(slope_series.pct_change().diff()).dropna()\n gaps = diffs[diffs > MAX_GAP / 100.0]\n ma = mas.tail(1).item()\n if ma > closes[-1] or ma_is_crossed:\n print(\"%s was below it's 100d moving average.\" % ticker)\n elif len(gaps):\n print(f'{ticker} has a gap > {MAX_GAP}%')\n else:\n ranks.append(len(ranks)+1)\n for idx, slope_days in enumerate(SLOPE_DAYS):\n if not slope_days in momentums:\n momentums[slope_days] = []\n mmntm = momentum(pd.Series(closes[-slope_days:]))\n momentums[slope_days].append((0, ticker, json[ticker][\"sector\"], json[ticker][\"universe\"], mmntm, atr_20(json[ticker][\"candles\"]), closes[-1]))\n except KeyError:\n print(f'Ticker {ticker} has corrupted data.')\n slope_std = SLOPE_DAYS[0]\n dfs = []\n for slope_days in SLOPE_DAYS:\n slope_suffix = f'_{slope_days}' if slope_days != slope_std else ''\n df = pd.DataFrame(momentums[slope_days], columns=[TITLE_RANK, TITLE_TICKER, TITLE_SECTOR, TITLE_UNIVERSE, TITLE_MOMENTUM, TITLE_RISK, TITLE_PRICE])\n df = df.sort_values(([TITLE_MOMENTUM]), ascending=False)\n df[TITLE_RANK] = ranks\n # df[TITLE_PERCENTILE] = pd.qcut(df[TITLE_MOMENTUM], 100, labels=False)\n df = df.head(MAX_STOCKS)\n risk_factor = RISK_FACTOR\n calc_runs = 2\n for run in range(1,calc_runs+1,1):\n # recalculate for positions target\n if run > 1 and not RISK_FACTOR_CFG and POS_COUNT_TARGET and (stocks_count < POS_COUNT_TARGET or stocks_count - POS_COUNT_TARGET > 1):\n risk_factor = RISK_FACTOR * (stocks_count / POS_COUNT_TARGET)\n df[TITLE_SHARES] = calc_stocks_amount(ACCOUNT_VALUE, risk_factor, df[TITLE_RISK])\n df[TITLE_POS_SIZE] = calc_pos_size(df[TITLE_SHARES], df[TITLE_PRICE])\n (sums, stocks_count) = calc_sums(ACCOUNT_VALUE, df[TITLE_POS_SIZE])\n df[TITLE_SUM] = sums\n\n df.to_csv(os.path.join(DIR, \"output\", f'mmtm_posis{slope_suffix}.csv'), index = False)\n\n watchlist = open(os.path.join(DIR, \"output\", f'Momentum{slope_suffix}.txt'), \"w\")\n first_10_pf = \"\"\n tv_ticker_count = 0\n for index, row in df.iterrows():\n plus_sign = \"\" if tv_ticker_count == 0 else \"+\"\n # TradingView only supports combining 10 Tickers :(((\n if row[TITLE_POS_SIZE] > 0 and row[TITLE_SUM] <= ACCOUNT_VALUE and tv_ticker_count < 10:\n tv_ticker_count = tv_ticker_count + 1\n first_10_pf = f'{first_10_pf}{plus_sign}{row[TITLE_SHARES]}*{row[TITLE_TICKER]}'\n # first_10_combined = f'{first_10_combined})/{tv_ticker_count}'\n watchlist_stocks = ','.join(df.head(MAX_STOCKS)[TITLE_TICKER])\n watchlist.write(f'{first_10_pf},{watchlist_stocks}')\n watchlist.close()\n\n dfs.append(df)\n\n return dfs\n\n\ndef main():\n posis = positions()\n print(posis[0])\n print(\"***\\nYour 'mmtm_posis.csv' is in the output folder.\\n***\")\n if cfg(\"EXIT_WAIT_FOR_ENTER\"):\n input(\"Press Enter key to exit...\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"skyte/momentum","sub_path":"momentum_posis.py","file_name":"momentum_posis.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"}
+{"seq_id":"31590777921","text":"n = int(input())\na = [list(map(int, input().split())) for _ in range(n)]\n# 최대값을 구하는 largest 변수 / 먼저 가장 작은 값으로 설정\nlargest = -2147000000\nfor i in range(n):\n # 행의 합은 sum1, 열의 합은 sum2로 설정\n sum1 = sum2 = 0\n for j in range(n):\n # 행의 합은 a 리스트 i행의 0부터 마지막까지의 j열을 다 더한 값\n sum1 += a[i][j]\n # 열의 합은 a 리스트 i열의 0부터 마지막까지의 j행을 다 더한 값\n sum2 += a[j][i]\n # 해당 if문을 통해 행의 합과 열의 합에서 가장 큰 수를 계산\n if sum1 > largest:\n largest = sum1\n if sum2 > largest:\n largest = sum2\n\n# 이제 대각선의 합 구하기\nsum1 = sum2 = 0\nfor i in range(n):\n # 격자판 왼쪽 끝부터 오른쪽 아래까지의 대각선 합\n sum1 += a[i][i]\n # 격자판 오른쪽 끝부터 왼쪽 아래까지의 대각선 합\n sum2 += a[i][n-i-1]\n# 해당 if문을 통해 위에서 계산한 largest와 대각선 2개의 합에서 가장 큰 수를 계산\nif sum1 > largest:\n largest = sum1\nif sum2 > largest:\n largest = sum2\n\nprint(largest)","repo_name":"tkdqor/coding_test_practice","sub_path":"알고리즘 강의/탐색과 시뮬레이션/격자판 최대합.py","file_name":"격자판 최대합.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"17007459273","text":"import streamlit as st\r\nimport pandas as pd\r\nimport altair as alt\r\n\r\nst.title(\"Gender equality\")\r\n\r\n\r\nst.header(\"Gender equality\")\r\n\r\ndf = pd.read_csv(\"genderTransformed.csv\", sep=',', header=0)\r\n\r\n\r\n# a lot of missing data so will start from 1990\r\nis_selected = df['Year']>=1990\r\ndf = df[is_selected]\r\n\r\n \r\ndf = df.sort_values(by=['Country Name'])\r\ncountries = df['Country Name'].unique()\r\n\r\n\r\ncountry3 = st.selectbox('Choose country',countries)\r\nindicators = ['Gender Gap Labor force participation % (15+age)','Gender Gap Part time employment %','Gender Gap Self-employed %','Gender Gap Unemployment %','Gender Gap Vulnerable employment %','Gender Gap Wage and salaried workers %']\r\nindicator = st.selectbox('Choose Indicator',indicators)\r\n\r\n\r\n\r\nif st.button(\"Show chart 1\"):\r\n is_selected = df['Country Name']==country3\r\n df_selected = df[is_selected]\r\n df1 = df_selected[['Year', indicator]]\r\n \r\n y = indicator.split(\"%\")\r\n \r\n # lets try to deceive by increasing the domain of Y axis, so the gap looks smaller and changes smoother throught the years\r\n chart3_deceive = alt.Chart(df1).mark_area().encode(\r\n alt.X('Year:N'),\r\n\talt.Y(indicator+\":Q\",scale=alt.Scale(domain=(-100, 100))),\r\n ).properties(\r\n title={\r\n \"text\": [indicator + \" for \"+ country3]\r\n }\r\n )\r\n chart3_deceive.encoding.y.title = y[0]\r\n st.altair_chart(chart3_deceive)\r\n\r\n\r\n\r\ncountry = st.selectbox('Country',countries)\r\nif st.button(\"Show chart 3\"):\r\n is_selected = df['Country Name']==country\r\n df_selected = df[is_selected]\r\n\r\n base = alt.Chart(df_selected.reset_index()).encode(x='Year').properties(\r\n title='Gender Equality indicators for '+country\r\n )\r\n \r\n chart = alt.layer(\r\n base.mark_line(color='red').encode(y='Gender Gap Part time employment %'),\r\n base.mark_line(color='orange').encode(y='Gender Gap Self-employed %'),\r\n base.mark_line(color='green').encode(y='Gender Gap Unemployment %'),\r\n base.mark_line(color='purple').encode(y='Gender Gap Vulnerable employment %'),\r\n base.mark_line(color='pink').encode(y='Gender Gap Wage and salaried workers %')\r\n )\r\n chart.layer[0].encoding.y.title = 'Gender Gap'\r\n st.write(chart)\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"LediaIsaj/VisualAnalytics_gender_equality","sub_path":"assignment4_deceive.py","file_name":"assignment4_deceive.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23599048851","text":"#!/usr/bin/env python\n\nimport sys, math\nimport fractions\nfrom itertools import repeat, count, cycle, ifilter, ifilterfalse, imap, starmap, tee, izip, product, combinations, permutations\nfrom collections import defaultdict\nfrom operator import itemgetter\n\n\ndef mapInstance( foo, istream ):\n N = int( istream.readline() )\n idata = []\n for i in xrange(N):\n idata.append( map( int, istream.readline().strip() )[::-1] )\n return foo( idata )\n \ndef mapInput( foo, preproc = None, istream = sys.stdin, ostream = sys.stdout ):\n N = map( int, istream.readline().split() )[0]\n if preproc:\n for i in xrange(D):\n preproc( istream.readline().split() )\n odata = starmap( mapInstance, repeat( ( foo, istream ), N ) )\n for i, d in enumerate( odata ):\n print >>sys.stderr, \"Case #%d\" % ( i+1 )\n print >>ostream, \"Case #%d: %s\" % ( i+1, d )\n \nclass showfunction:\n def __init__( self, foo ):\n self.foo = foo\n \n def __call__( self, *args ):\n result = self.foo( *args )\n print >>sys.stderr, args, result\n return result\n\nclass cachedfunction:\n def __init__( self, foo ):\n self.foo = foo\n self.cache = {}\n \n def __call__( self, *args ):\n if args in self.cache:\n return self.cache[args]\n else:\n result = self.cache[args] = self.foo( *args )\n return result\n\ndef solve( idata ):\n idata = map( lambda x: x+[1], idata )\n rows = map( lambda x: x.index(1), idata )\n \n #@cachedfunction\n def rek(n, visited):\n if n == 0:\n return 0\n \n swap = -1\n mini = 10000\n for i in range(len(rows)):\n if i in visited: continue\n swap += 1\n if rows[i] < n: continue\n this = swap + rek(n-1, visited | set([i]))\n if this < mini:\n mini = this\n return mini\n \n return str( rek(len(rows)-1, set()) )\n \ndef main( args ):\n mapInput( solve )\n\nif __name__ == \"__main__\":\n main( sys.argv )\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_46/104.py","file_name":"104.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36840947764","text":"import sys\nimport os\nimport numpy as np\nimport MeshTools.io.petrel as petrel\nimport vtkwriters as vtkw\n\nfilename = sys.argv[1]\nmesh, perm = petrel.import_eclipse_grid(filename)\noffsets, cellsnodes = mesh.cells_nodes_as_COC()\nvtkw.write_vtu(\n vtkw.vtu_doc_from_COC(\n mesh.vertices_array(),\n np.array(offsets[1:], copy=False), # no first zero offset for vtk\n np.array(cellsnodes, copy=False),\n mesh.cells_vtk_ids(),\n celldata={\n name: np.asarray(values) for name, values in zip([\"kx\", \"ky\", \"kz\"], perm)\n },\n ),\n os.path.splitext(filename)[0] + \".vtu\",\n)\n","repo_name":"BRGM/MeshTools","sub_path":"tests/experimental/petrel/petrel2vtu.py","file_name":"petrel2vtu.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12970431148","text":"import subprocess\nimport time\nimport os\nfrom datetime import datetime\n\nlast_trimmed = 0\n\ndef trimmer():\n global last_trimmed\n nodine_log_path = \"/home/inery-genesis/inery.setup/master.node/blockchain/nodine.log\"\n log_storage = \"/log_storage\"\n time_created = time.time()\n oldest_file = \"\"\n return_nodine_size = subprocess.Popen([\"sudo\", \"du\", nodine_log_path], stdout=subprocess.PIPE)\n size_text = return_nodine_size.communicate()[0].decode()\n size_f = size_text.split(\"\\t\")[0]\n subprocess.run([\"echo\", \"Size of file \", str(size_f), \"KB\\n\", \"Last trimmed \", str(last_trimmed), \" / \", datetime.fromtimestamp(last_trimmed).strftime(\"%A, %B %d, %Y %I:%M:%S\")])\n if (float(size_f) > 210000000) or ((time.time() - last_trimmed) > 432000):\n for root, dirs, files in os.walk(log_storage):\n if len(files) >= 5:\n for file in files:\n if file.endswith(\".zip\"):\n if os.path.getctime(os.path.join(root, file)) < time_created:\n oldest_file = os.path.join(root, file)\n time_created = os.path.getctime(os.path.join(root, file))\n if oldest_file != \"\":\n subprocess.run([\"sudo\", \"rm\", \"-rf\", oldest_file])\n oldest_file = \"\"\n subprocess.run([\"sudo\", \"zip\", \"-r\", log_storage + \"/nodinelog\" + str(time.time()) + \".zip\", nodine_log_path])\n subprocess.run([\"echo\", \"Backuped nodine.log file \", str(datetime.now())])\n subprocess.run([\"truncate\", \"-s\", \"0\", nodine_log_path])\n last_trimmed = time.time()\n\nif __name__ == \"__main__\":\n while(True):\n try:\n trimmer()\n except Exception as e:\n print(e)\n pass\n time.sleep(3600)\n","repo_name":"vanja032/LogCleaner","sub_path":"LogTrimmer.py","file_name":"LogTrimmer.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41825001153","text":"from typing import Union\r\n\r\nfrom fastapi import FastAPI\r\n\r\nfrom selenium import webdriver\r\n\r\nimport time\r\n\r\napp = FastAPI()\r\n\r\n\r\n@app.get(\"/\")\r\ndef read_root():\r\n return {\"Hello\": \"World\",\r\n \"Amber\": \"!!!\"}\r\n\r\n\r\n@app.get(\"/items/{item_id}\")\r\ndef read_item(item_id: int, q: Union[str, None] = None):\r\n return {\"item_id\": item_id, \"q\": q}\r\n\r\n\r\n@app.get(\"/selenium/{item_id}\")\r\ndef selenium(item_id: int):\r\n try:\r\n browser = webdriver.Remote(\r\n command_executor='http://xxx.xxx.xx.xx:14444/wd/hub',\r\n options=webdriver.ChromeOptions()\r\n )\r\n browser.get('https://www.google.com')\r\n browser.save_screenshot(\"./app/chrome.png\")\r\n print(browser.title)\r\n\r\n\r\n except Exception as error:\r\n print(error)\r\n finally:\r\n time.sleep(30)\r\n browser.quit()\r\n\r\n\r\n return {\"item_id\": item_id}\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"draguitar/Fastapi-SeleniumGrid","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23421202191","text":"def detN(X, F, C):\r\n N = (round(((X*F-2*C)/(F*C)-0.5)))\r\n if N < 0:\r\n return 0\r\n else:\r\n return N\r\ndef detT(X, F, C):\r\n N = detN(X,F,C)\r\n T = X/(2+((N)*F))\r\n for i in range(0, int(N)):\r\n T = T + C/(2+i*F)\r\n return T\r\nT = int(raw_input())\r\nB = open('GCJ2k14A2.out','w')\r\nfor i in range(1,T+1):\r\n C, F, X = raw_input().split()\r\n C, F, X = float(C), float(F), float(X)\r\n B.write(\"Case #\" + str(i) + \": \" + str(detT(X,F,C)) + \"\\n\")\r\nB.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1203.py","file_name":"1203.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2453921878","text":"from tkinter.ttk import Treeview\nfrom tkinter import END\nfrom Frames_Enums.enums import Align_Text, LabelPosition\n\nclass TTreeview(Treeview):\n \"\"\"Treeview customizado.\n Para usar os eventos do botão, cria a def usando o prefixo que passou no parâmetro 'callback_prefix' + '_' + nome do evento.\n Eventos disponíveis:\n on_backspace_press: Quando a tecla backspace é pressionada.\n on_button_release: Quando o botão do mouse é solto.\n on_change: Quando o texto é alterado.\n on_click: Quando o botão é clicado.\n on_drag: Quando o mouse é arrastado.\n on_drop: Quando o mouse é arrastado e solto.\n on_double_click: Quando o botão é clicado duas vezes.\n on_enter: Quando o mouse entra no botão.\n on_enter_press: Quando a tecla enter/return é pressionada.\n on_escape_press: Quando a tecla escape é pressionada.\n on_exit: Quando o mouse sai do botão.\n on_focus: Quando o botão ganha foco.\n on_key_press: Quando uma tecla é pressionada.\n on_key_release: Quando uma tecla é solta.\n on_left_click: Quando o botão é clicado com o botão esquerdo do mouse.\n on_mousewheel_click: Quando o botão é clicado com o botão do meio do mouse.\n on_tab_press: Quando a tecla tab é pressionada.\n on_title_click: Quando o título é clicado, precisa de parametro para titulo da coluna\n on_unfocus: Quando o botão perde foco.\"\"\"\n\n def __init__(self, titulos: list, tamanhos: list, master: any = None, callback_prefix: str = '', **kwargs):\n if len(titulos) != len(tamanhos):\n raise ValueError('O número de titulos deve ser igual ao número de tamanhos.')\n self.callback_prefix = callback_prefix\n super().__init__(master,columns=titulos, show=\"headings\", **kwargs)\n self.__draged = False\n self.__criar_titulos(titulos, tamanhos)\n self.__bind_events()\n\n def __criar_titulos(self, titulos: list, tamanhos: list, align_titulo: Align_Text = Align_Text.CENTER, align_dados: Align_Text = Align_Text.CENTER):\n \"\"\"Private method.: Cria os titulos.\"\"\"\n for posicao, titulo in enumerate(titulos):\n self.heading(titulo, text=titulo, anchor=align_titulo.value['anchor'], command=lambda t=titulo: self.on_title_click(t))\n self.column(posicao, width=tamanhos[posicao], anchor=align_dados.value['anchor'])\n\n def __bind_events(self):\n \"\"\"Private method.: Cria os eventos do botão.\"\"\" \n events = {\n '': 'on_backspace_press', \n '': 'on_click',\n '': 'on_mousewheel_click',\n '': 'on_left_click',\n '': 'on_double_click',\n '': 'on_enter',\n '': 'on_escape_press',\n '': 'on_focus',\n '': 'on_leave_focus',\n '': 'on_key_release',\n '': 'on_exit',\n '': 'on_enter_press',\n '': 'on_tab_press',\n }\n self.bind('', self.on_drag)\n self.bind('', self.on_button_release)\n self.bind('', self.on_key_press)\n\n for event, event_type in events.items():\n self.bind(event, self.__create_event_handler(event_type))\n \n return self\n\n def set_focus(self):\n \"\"\"Coloca o foco no botão.\"\"\"\n self.focus_set()\n\n def get_selected_line(self):\n \"\"\"Retorna a linha selecionada.\"\"\"\n return self.selection()\n\n def clear(self):\n \"\"\"Limpa o texto.\"\"\"\n self.delete(1.0, END)\n\n def on_button_release(self, event):\n \"\"\"Evento disparado quando o botão do mouse é solto.\"\"\"\n self.__call_callback('on_button_release')\n if self.__draged:\n self.__call_callback('on_drop')\n self.__draged = False\n\n def on_drag(self, event):\n \"\"\"Evento disparado quando o mouse é arrastado.\"\"\"\n self.__call_callback('on_drag')\n self.__draged = True\n\n def on_key_press(self, event):\n \"\"\"Evento disparado quando uma tecla é pressionada.\"\"\"\n self.__call_callback('on_key_press')\n self.__call_callback('on_change')\n\n def on_title_click(self, titulo: str):\n \"\"\"Evento disparado quando o titulo é clicado.\"\"\"\n method_name = f'{self.callback_prefix}_on_title_click'\n callback = getattr(self.master, method_name, None)\n if callback and callable(callback):\n callback(titulo)\n\n def __create_event_handler(self, event_type):\n \"\"\"Private method.: Cria o handler do evento.\"\"\"\n def handler(event=None):\n self.__call_callback(event_type)\n return handler\n\n def __call_callback(self, event_type):\n \"\"\"Private method.: Chama o callback.\"\"\"\n method_name = f'{self.callback_prefix}_{event_type}'\n callback = getattr(self.master, method_name, None)\n if callback and callable(callback):\n callback()","repo_name":"ryan-henrique-nantes/framework","sub_path":"framework/Components/tabela.py","file_name":"tabela.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23392782341","text":"from math import sqrt, ceil, floor\nfrom sys import argv\nfrom lib.File import FileParser\n\ndef palindrome(argi):\n args = str(argi)\n return args == args[::-1]\n\ndef case_logic(case_args):\n a, b = map(int, case_args[0].split())\n base_a = int(ceil(sqrt(a)))\n base_b = int(floor(sqrt(b)))\n found = 0\n for i in range(base_a, base_b+1):\n if palindrome(i) and palindrome(i * i):\n found += 1\n return str(found)\n\ndef main(args):\n parser = FileParser(1, 1, args[0])\n problem = parser.parse_problem()\n problem.set_case_logic(case_logic)\n problem.solve()\n problem.done()\n\nif __name__ == \"__main__\":\n main(argv[1:])\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/1790.py","file_name":"1790.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74179538434","text":"from typing import Callable\n\nfrom hypothesis import given\n\nfrom lz.logical import disjoin\nfrom tests import strategies\nfrom tests.hints import Domain\n\n\n@given(strategies.predicates, strategies.predicates_arguments)\ndef test_idempotency(predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n self_disjunction = disjoin(predicate)\n\n result = self_disjunction(predicate_argument)\n\n assert result is predicate(predicate_argument)\n\n\n@given(strategies.predicates, strategies.true_predicates,\n strategies.predicates_arguments)\ndef test_absorbing_element(predicate: Callable[[Domain], bool],\n true_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n left_disjunction = disjoin(predicate, true_predicate)\n right_disjunction = disjoin(true_predicate, predicate)\n\n left_result = left_disjunction(predicate_argument)\n right_result = right_disjunction(predicate_argument)\n\n assert left_result is right_result is true_predicate(predicate_argument)\n\n\n@given(strategies.predicates, strategies.false_predicates,\n strategies.predicates_arguments)\ndef test_neutral_element(predicate: Callable[[Domain], bool],\n false_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n left_disjunction = disjoin(predicate, false_predicate)\n right_disjunction = disjoin(false_predicate, predicate)\n\n left_result = left_disjunction(predicate_argument)\n right_result = right_disjunction(predicate_argument)\n\n assert left_result is right_result is predicate(predicate_argument)\n\n\n@given(strategies.predicates, strategies.predicates,\n strategies.predicates_arguments)\ndef test_commutativity(left_predicate: Callable[[Domain], bool],\n right_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n left_disjunction = disjoin(left_predicate, right_predicate)\n right_disjunction = disjoin(right_predicate, left_predicate)\n\n left_result = left_disjunction(predicate_argument)\n right_result = right_disjunction(predicate_argument)\n\n assert left_result is right_result\n\n\n@given(strategies.predicates, strategies.predicates, strategies.predicates,\n strategies.predicates_arguments)\ndef test_associativity(left_predicate: Callable[[Domain], bool],\n mid_predicate: Callable[[Domain], bool],\n right_predicate: Callable[[Domain], bool],\n predicate_argument: Domain) -> None:\n first_disjunction = disjoin(disjoin(left_predicate, mid_predicate),\n right_predicate)\n second_disjunction = disjoin(left_predicate, disjoin(mid_predicate,\n right_predicate))\n\n assert (first_disjunction(predicate_argument)\n is second_disjunction(predicate_argument))\n","repo_name":"lycantropos/lz","sub_path":"tests/logical_tests/test_disjoin.py","file_name":"test_disjoin.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"4242139661","text":"# Veri önişleme ve analizi için kullanılacak kütüphane eklenir\nimport pandas as pd\n\n# Kırmızı şarapların olduğu veri seti okunur\nred_wine_dataset = pd.read_csv('../input/qualityofredwine/winequality-red.csv')\n\n# Beyaz şarapların olduğu veri seti okunur\nwhite_wine_dataset = pd.read_csv('../input/qualityofwhitewine/winequality-white.csv')\n\n# Red şarapların olduğu veri setinin her bir sütununda toplam kaç tane boş kayıt olduğuna bakılır\nred_wine_dataset.isnull().sum()\n\n# Red şarapların olduğu veri setindeki tahmin edilecek değerin kaç farklı değer ve bu değerlerin toplam adetlerine bakılır\nred_wine_dataset.quality.value_counts()\n\n# Beyaz şarapların olduğu veri setinin her bir sütununda toplam kaç tane boş kayıt olduğuna bakılır\nwhite_wine_dataset.isnull().sum()\n\n# Beyaz şarapların olduğu veri setindeki tahmin edilecek değerin kaç farklı değer ve bu değerlerin toplam adetlerine bakılır\nwhite_wine_dataset.quality.value_counts()\n\n# Kırmızı ve beyaz şarapların olduğu veri setleri ayrı ayrı olarak parçalanır\n# X değişkenlerinde veri setlerin öznitelikleri ve y değişkenlerinde de tahmin edilecek değer yer alır\nred_wine_X = red_wine_dataset.iloc[:,:-1].values\nred_wine_y = red_wine_dataset.iloc[:,-1].values\n\nwhite_wine_X = white_wine_dataset.iloc[:,:-1].values\nwhite_wine_y = white_wine_dataset.iloc[:,-1].values\n\n# Kırmızı ve beyaz şarap veri setleri, eğitim ve test verileri olarak ayrılır\nfrom sklearn.model_selection import train_test_split\nred_wine_X_train, red_wine_X_test, red_wine_y_train, red_wine_y_test = train_test_split(red_wine_X, red_wine_y, test_size = 0.20, random_state = 4)\nwhite_wine_X_train, white_wine_X_test, white_wine_y_train, white_wine_y_test = train_test_split(white_wine_X, white_wine_y, test_size = 0.20, random_state = 4)\n\n# Tahminlerden sonra veri çatılarını birleştirmek için yedeklenir\ncache_red_wine_X_test = red_wine_X_test\ncache_red_wine_y_test = red_wine_y_test\ncache_white_wine_X_test = white_wine_X_test\ncache_white_wine_y_test = white_wine_y_test\n\n# Öznitelikler arasındaki korelasyon sayıları düşük olduğundan veri setlerin öznitelikleri ölçeklendirilir\nfrom sklearn.preprocessing import StandardScaler\nred_wine_sc = StandardScaler()\nred_wine_X_train = red_wine_sc.fit_transform(red_wine_X_train)\nred_wine_X_test = red_wine_sc.transform(red_wine_X_test)\nwhite_wine_sc = StandardScaler()\nwhite_wine_X_train = white_wine_sc.fit_transform(white_wine_X_train)\nwhite_wine_X_test = white_wine_sc.transform(white_wine_X_test)\n\n# y değişkenlerindeki nümerik değerler kategorik hale dönüştürülür\nred_wine_y_train = red_wine_y_train.astype('object')\nred_wine_y_test = red_wine_y_test.astype('object')\nwhite_wine_y_train = white_wine_y_train.astype('object')\nwhite_wine_y_test = white_wine_y_test.astype('object')\n\n# Kategorik veriler sayısallaştırılır\nfrom sklearn.preprocessing import LabelEncoder\nred_wine_labelencoder_y = LabelEncoder()\nred_wine_y_train = red_wine_labelencoder_y.fit_transform(red_wine_y_train)\nred_wine_y_test = red_wine_labelencoder_y.transform(red_wine_y_test)\nwhite_wine_labelencoder_y = LabelEncoder()\nwhite_wine_y_train = white_wine_labelencoder_y.fit_transform(white_wine_y_train)\nwhite_wine_y_test = white_wine_labelencoder_y.transform(white_wine_y_test)\n\n# Sayısal olan kategorik veriler ikili sisteme dönüştürülür\nfrom keras.utils import np_utils\nred_wine_y_train = np_utils.to_categorical(red_wine_y_train)\nred_wine_y_test = np_utils.to_categorical(red_wine_y_test)\nwhite_wine_y_train = np_utils.to_categorical(white_wine_y_train)\nwhite_wine_y_test = np_utils.to_categorical(white_wine_y_test)\n\n# Keras'ın model ve katman kütüphaneleri eklenir\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n\n# Kırmızı veri seti için Sequential nesnesinden değişken yaratılır ve değişkene 4 katman eklenir\nred_wine_classifier = Sequential()\nred_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\nred_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nred_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nred_wine_classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'softmax'))\n\n# Model, ilgili parametreler ile derlenir\nred_wine_classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Bu örnek model eğitim ve test verileriyle eğitilir\nred_wine_history = red_wine_classifier.fit(red_wine_X_train, red_wine_y_train, epochs = 200, batch_size = 128, verbose = 2, validation_data = (red_wine_X_test, red_wine_y_test))\n\n# Beyaz veri seti için Sequential nesnesinden değişken yaratılır ve değişkene 4 katman eklenir\nwhite_wine_classifier = Sequential()\nwhite_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\nwhite_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nwhite_wine_classifier.add(Dense(units = 44, kernel_initializer = 'uniform', activation = 'relu'))\nwhite_wine_classifier.add(Dense(units = 7, kernel_initializer = 'uniform', activation = 'softmax'))\n\n# Model, ilgili parametreler ile derlenir\nwhite_wine_classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Bu örnek model eğitim ve test verileriyle eğitilir\nwhite_wine_history = white_wine_classifier.fit(white_wine_X_train, white_wine_y_train, epochs = 200, batch_size = 128, verbose = 2, validation_data = (white_wine_X_test, white_wine_y_test))\n\n# Veri görselleştirmede kullanılan kütüphane eklenir\nfrom matplotlib import pyplot as plt\n\n# Kırmızı ve beyaz şarap için oluşturulan modellerin eğitim boyunca başarı oranını grafiksel olarak gösterir\nplt.figure(figsize=(15,3))\nplt.subplot(1, 2, 1)\n\nplt.plot(red_wine_history.history['acc'])\nplt.plot(red_wine_history.history['val_acc'])\nplt.title('Red Wine Model Accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='lower right')\n\nplt.figure(figsize=(15,3))\nplt.subplot(1, 2, 1)\n\nplt.plot(white_wine_history.history['acc'])\nplt.plot(white_wine_history.history['val_acc'])\nplt.title('White Wine Model Accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='lower right')\nplt.show()\n\n# Kırmızı şarap için eğitilen model tahminleme işlemine tabi tutulur\nred_wine_y_pred = red_wine_classifier.predict(red_wine_X_test)\n\n# Beyaz şarap için eğitilen model tahminleme işlemine tabi tutulur\nwhite_wine_y_pred = white_wine_classifier.predict(white_wine_X_test)\n\n# Kırmızı şarap tahminleri aşağıdaki döngüde 6'lık sisteme getirilir\nred_wine_max_i = red_wine_y_pred.max(axis=1)\nfor i in range(len(red_wine_y_pred)):\n for j in range(6):\n if red_wine_y_pred[i,j] == red_wine_max_i[i]:\n red_wine_y_pred[i,j] = 1\n else:\n red_wine_y_pred[i,j] = 0\n \n# Beyaz şarap tahminleri aşağıdaki döngüde 7'lik sisteme getirilir\nwhite_wine_max_i = white_wine_y_pred.max(axis=1)\nfor i in range(len(white_wine_y_pred)):\n for j in range(7):\n if white_wine_y_pred[i,j] == white_wine_max_i[i]:\n white_wine_y_pred[i,j] = 1\n else:\n white_wine_y_pred[i,j] = 0\n \n# Kırmızı şarap tahminlerinin toplam doğruluk oranı hesaplanır\nred_wine_crt_values = (red_wine_y_pred == red_wine_y_test).sum()\nred_wine_wrong_values = (red_wine_y_pred != red_wine_y_test).sum()\nred_wine_total = red_wine_crt_values+red_wine_wrong_values\nred_wine_result = red_wine_crt_values/red_wine_total\nprint(red_wine_result)\n\n# Beyaz şarap tahminlerinin toplam doğruluk oranı hesaplanır\nwhite_wine_crt_values = (white_wine_y_pred == white_wine_y_test).sum()\nwhite_wine_wrong_values = (white_wine_y_pred != white_wine_y_test).sum()\nwhite_wine_total = white_wine_crt_values+white_wine_wrong_values\nwhite_wine_result = white_wine_crt_values/white_wine_total\nprint(white_wine_result)\n\n# Karmaşıklık matrislerini bulmak adına kullanılan Python kütüphanesi eklenir\nfrom sklearn.metrics import confusion_matrix\n# Matematiksel işlemlerde kullanılan Python kütüphanesi eklenir\nimport numpy as np\n\n# Kırmızı şarapların tahmin doğruluğunu görmek adına karmaşıklık matrisi oluşturulur\nred_wine_y_test = [np.where(r==1)[0][0] for r in red_wine_y_test]\nred_wine_y_pred = [np.where(r==1)[0][0] for r in red_wine_y_pred]\nred_wine_cm = confusion_matrix(red_wine_y_test,red_wine_y_pred)\nprint(red_wine_cm)\n\n# Beyaz şarapların tahmin doğruluğunu görmek adına karmaşıklık matrisi oluşturulur\nwhite_wine_y_test = [np.where(r==1)[0][0] for r in white_wine_y_test]\nwhite_wine_y_pred = [np.where(r==1)[0][0] for r in white_wine_y_pred]\nwhite_wine_cm = confusion_matrix(white_wine_y_test, white_wine_y_pred)\nprint(white_wine_cm)\n\n# Kırmızı ve beyaz şarap tahminleri eski tablolarla birleştirilir\nred_wine_X_test = pd.DataFrame(cache_red_wine_X_test)\nred_wine_y_test = pd.DataFrame(red_wine_y_test)\nred_wine_y_pred = pd.DataFrame(red_wine_y_pred)\n\nred_wine_X_test = pd.concat([red_wine_X_test, red_wine_y_test], axis=1)\nred_wine_X_test = pd.concat([red_wine_X_test, red_wine_y_pred], axis=1)\nred_wine_X_test.columns = [\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\",\"real quality\",\"predicted quality\"]\n\nwhite_wine_X_test = pd.DataFrame(cache_white_wine_X_test)\nwhite_wine_y_test = pd.DataFrame(white_wine_y_test)\nwhite_wine_y_pred = pd.DataFrame(white_wine_y_pred)\nwhite_wine_X_test = pd.concat([white_wine_X_test, white_wine_y_test], axis=1)\nwhite_wine_X_test = pd.concat([white_wine_X_test, white_wine_y_pred], axis=1)\nwhite_wine_X_test.columns = [\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\",\"real quality\",\"predicted quality\"]\n\nred_wine_X_test.to_csv('prediction-of-red-wine-quality.csv')\nwhite_wine_X_test.to_csv('prediction-of-white-wine-quality.csv')\n","repo_name":"burakcantimucin/WineQualityPrediction","sub_path":"wine-quality-prediction-with-keras.py","file_name":"wine-quality-prediction-with-keras.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"956667746","text":"class Solution:\n def isMonotonic(self, A: List[int]) -> bool:\n pos=False\n neg=False\n for i in range(len(A)-1):\n dif=A[i+1]-A[i]\n if dif>0:\n pos=True\n elif dif<0:\n neg=True\n return not (pos and neg)\n","repo_name":"jingzhij/Leetcode","sub_path":"896 单调数列.py","file_name":"896 单调数列.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71754970113","text":"##########################################################################\n################################ SLITHER #################################\n##########################################################################\n\n'''\nWHAT IS THIS?\n Digital snakes that each have their own genetically evolved artificial intelligence. The snakes live in a\n virtual 2D world with confined resources/energy. Consuming energy increases the length of the snake at a\n one-to-one ratio through an extension of the length of snake (from its tail) in the next iteration of the world.\n The snakes must move head first in each iteration of the world. They can curl up/overlap on themselves,\n but if they move into a space occupied by another snake, they will die, and their body will be converted into\n energy at a one-to-one ratio. The rules of the world are inspired by slither.io.\n'''\n\nimport operator\nprint(dir(operator))\nimport numpy\nimport random\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport platform\nif platform.system() == 'Darwin':\n matplotlib.use('MacOSX')\nelse:\n matplotlib.use('TkAgg')\n\nimport copy\nimport traceback\nfrom time import sleep\n\nclass World():\n def __init__(self, size=100, initial_being_spawn_count=100, initial_energy_spawn_count=150):\n self.object_world = numpy.zeros(shape=(size,size), dtype=object)\n self.id_world = numpy.zeros(shape=(size,size))\n self.size = size\n self.Emperor_DNA = False # Longest Being ever to live in this World\n self.Emperor_length = 0\n self.King_DNA = False # Longest Being that is alive\n self.King_length = 0\n self.Queen_DNA = False # Second longest Being that is alive\n self.Queen_length = 0\n self.all_Beings = self.spawn_beings(initial_being_spawn_count)\n self.uneaten_Energy = self.spawn_energy(initial_energy_spawn_count)\n self.World_age = 0\n self.all_potential_locations = []\n\n class Energy(): # Not necessary to have Energy object at this point but adding so have it for later\n def __init__(self, location, energy_count=1):\n self.location = location # Nested lists with X, Y coordinates; len(Energy.location) always = 1\n self.energy_count = energy_count\n self.energy_id = 2\n # self.energy_id = self.energy_count + 1 # Can use thiis if want energy_count to be visible in id_world\n\n class Being():\n\n def __init__(self, _World, location, parent_DNA=False):\n self.location = location # Nested lists with X, Y coordinates\n self.head = location[0] # List with X, Y coordinates\n self.energy = 0\n self.age = 0\n random.seed()\n self.head_id = random.uniform(1.0, 1.25)\n self.body_id = random.uniform(1.26, 1.50)\n # There is the technical potential for colisions (eg,\n # two identical head_id)\n\n if parent_DNA != False:\n random.seed()\n random_index = random.choice(range(len(str(parent_DNA))))\n random_digit = random.choice(list('0123456799'))\n baby_DNA = int((str(parent_DNA)[:random_index] + random_digit + str(parent_DNA)[random_index + 1:]))\n self.DNA = baby_DNA\n # elif (_World.King_DNA != False) and (_World.Queen_DNA != False):\n # try:\n # baby_DNA = \"\"\n # for index in range(len(str(_World.King_DNA))):\n # random.seed()\n # chosen_DNA = random.choice([_World.King_DNA, _World.Queen_DNA])\n # baby_DNA = baby_DNA + str(chosen_DNA)[index]\n # self.DNA = int(baby_DNA)\n # except Exception as e:\n # print(\"Uh oh... Ran into error while compiling baby_DNA: \" + str(e))\n # traceback.print_exc()\n # print(\"Generating random DNA instead...\")\n # random.seed(self.head_id)\n # random_DNA = [random.choice(list('0123456799')) for i in range(200)]\n # random_DNA = ''.join(random_DNA)\n # random_DNA = int(random_DNA)\n # self.DNA = random_DNA\n # elif (_World.King_DNA != False) and (_World.Queen_DNA == False):\n # random.seed()\n # random_index = random.choice(range(len(str(_World.King_DNA))))\n # random_digit = random.choice(list('0123456799'))\n # baby_DNA = int((str(_World.King_DNA)[:random_index] + random_digit + \\\n # str(_World.King_DNA)[random_index + 1:]))\n # self.DNA = baby_DNA\n else:\n random.seed(self.head_id)\n random_DNA = [random.choice(list('0123456799')) for i in range(250)]\n random_DNA = ''.join(random_DNA)\n random_DNA = int(random_DNA)\n self.DNA = random_DNA\n\n def update_Being(self, _World):\n self.age = self.age + 1\n self.head = _World.choose_move(self)\n # self.head = random.choice(potential_locations) # (UPDATE TO ALLOW FOR MORE SOPHISTICATED CHOICE/MUTATIONS LATER)\n if self.head == None: # This is temporary fix for rare error where Being has no neck for some reason.\n pass\n elif self.energy > 0:\n new_location = [self.head]\n for coordinates in self.location:\n new_location.append(coordinates)\n self.location = new_location\n self.energy = self.energy - 1\n elif self.energy == 0:\n new_location = [self.head]\n for coordinates in self.location:\n new_location.append(coordinates)\n del new_location[-1]\n self.location = new_location\n return_package = [self]\n if ((self.age % 100) == 0) and (len(self.location) > 5):\n baby_body = self.location.pop()\n baby_head = self.location.pop()\n baby_location = [baby_head, baby_body]\n print(\"baby_location = \" + str(baby_location))\n baby_Being = _World.Being(_World, baby_location, parent_DNA=self.DNA)\n _World.all_Beings.append(baby_Being)\n counter = 1\n for coordinates in baby_location: # Think this is unecessary b/c worlds will be recompiled from\n # all_Beings\n if counter == 1:\n _World.object_world[coordinates[0], coordinates[1]] = baby_Being\n _World.id_world[coordinates[0], coordinates[1]] = baby_Being.head_id\n counter = counter + 1\n else:\n _World.object_world[coordinates[0], coordinates[1]] = baby_Being.body_id\n _World.id_world[coordinates[0], coordinates[1]] = baby_Being.body_id\n return_package.append(baby_Being)\n\n return return_package\n\n def update_World(self):\n # Every 100 years, spawn Energy if total_energy is below 298 (World somehow losing energy--this is just a patch\n # until figure out what's going on)\n total_energy = sum([Energy.energy_count for Energy in world.uneaten_Energy] + \\\n [len(Being.location) for Being in world.all_Beings] + \\\n [Being.energy for Being in world.all_Beings])\n # print(\"Total Energy: \" + str(total_energy))\n # print(\"\\tUneaten Energy: \" + str(sum([Energy.energy_count for Energy in world.uneaten_Energy])))\n # print(\"\\tSum of Being's Locations: \" + str(sum([len(Being.location) for Being in world.all_Beings])))\n # print(\"\\tSum of Being's Unused Energy: \" + str(sum([Being.energy for Being in world.all_Beings])))\n if total_energy < 350:\n self.uneaten_Energy = self.uneaten_Energy + self.spawn_energy(350 - total_energy)\n\n # After specified interval, spawn new Beings if population running low and sufficient uneaten_Energy\n if ((self.World_age % 51) == 0):\n if len(self.all_Beings) < 50:\n total_energy = sum([Energy.energy_count for Energy in world.uneaten_Energy] + \\\n [len(Being.location) for Being in world.all_Beings] + \\\n [Being.energy for Being in world.all_Beings])\n if len(self.uneaten_Energy) > 50 - len(self.all_Beings) * 3:\n spawn_count = 50 - len(self.all_Beings)\n else:\n spawn_count = int((len(self.uneaten_Energy) - 1) / 2)\n spawned_Beings = self.spawn_beings(spawn_count)\n self.all_Beings = self.all_Beings + spawned_Beings\n for i in range(spawn_count * 2):\n random.seed()\n try:\n del self.uneaten_Energy[\n random.choice(range(len(self.uneaten_Energy)))] # Remove Energy to keep equilibrium\n except:\n pass\n\n # Compile updated beings\n updated_beings = []\n copied_all_Beings = copy.deepcopy(self.all_Beings)\n\n for being in copied_all_Beings:\n # Not sure if have to do below or could just do updated_beings.append(being.update_Being())\n return_package = being.update_Being(self)\n for updated_being in return_package: # May include baby\n updated_beings.append(updated_being)\n\n # Create blank updated worlds\n updated_object_world = numpy.zeros(shape=(self.size,self.size), dtype=object)\n updated_id_world = numpy.zeros(shape=(self.size,self.size))\n\n # Creating lists b/c think faster to iterate through them rather than full world\n updated_being_heads = [updated_being.head for updated_being in updated_beings]\n updated_being_bodies = [updated_being.location[1:] for updated_being in updated_beings]\n uneaten_Energy_locations = [[Energy.location[0][0], Energy.location[0][1]] for Energy in self.uneaten_Energy]\n\n # Incorporate Energy to updated worlds and uneaten_energy_locations\n for Energy in self.uneaten_Energy:\n updated_object_world[Energy.location[0][0], Energy.location[0][1]] = Energy\n updated_id_world[Energy.location[0][0], Energy.location[0][1]] = Energy.energy_id\n\n # Incorporate updated_beings to updated worlds\n for updated_being in updated_beings:\n # Shrink one block every 300 years\n if ((updated_being.age % 300) == 0):\n coordinates = updated_being.location.pop(-1) # Delete last block (tail)\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count=1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n self.uneaten_Energy.append(realeased_Energy)\n\n # If beings' heads collided, both beings die in their updated locations (heads overlapping), with any unused\n # energy of either beings releasing where their heads overlapped\n if updated_being_heads.count(updated_being.head) > 1:\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n if isinstance(updated_object_world[coordinates[0], coordinates[1]], self.Energy):\n updated_Energy = updated_object_world[coordinates[0], coordinates[1]]\n updated_Energy.energy_count = updated_Energy.energy_count + 1 + updated_being.energy\n updated_object_world[coordinates[0], coordinates[1]] = updated_Energy\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]],\n energy_count = 1 + updated_being.energy)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count = 1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n # If being collides with other being's body, being dies without updating location, with any unused\n # releasing at its head\n elif any(updated_being.head in location for location in updated_being_bodies) and \\\n (updated_being.location.count(updated_being.head) == 1):\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]],\n energy_count=1 + updated_being.energy)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count = 1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n # If being collides with energy, the being consumes the energy and saves it for growing in subsequent\n # round(s)\n elif updated_being.head in uneaten_Energy_locations:\n updated_being.energy = updated_being.energy + \\\n updated_object_world[updated_being.head[0], updated_being.head[1]].energy_count\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.head_id\n else:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being.body_id\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.body_id\n uneaten_Energy_locations.remove(updated_being.head)\n # If being too small for age, kill being\n elif ((updated_being.age % 50) == 0) and len(updated_being.location) < (2 + (updated_being.age/50)):\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]],\n energy_count=1 + updated_being.energy)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n else:\n realeased_Energy = self.Energy([[coordinates[0], coordinates[1]]], energy_count = 1)\n updated_object_world[coordinates[0], coordinates[1]] = realeased_Energy\n updated_id_world[coordinates[0], coordinates[1]] = realeased_Energy.energy_id\n pass\n # If being collides with nothing, it's location is updated unless it is told, in which case it dies\n else:\n for coordinates in updated_being.location:\n if coordinates == updated_being.head:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.head_id\n else:\n updated_object_world[coordinates[0], coordinates[1]] = updated_being.body_id\n updated_id_world[coordinates[0], coordinates[1]] = updated_being.body_id\n\n # Update world\n self.object_world = updated_object_world\n self.id_world = updated_id_world\n updated_all_Beings = []\n updated_uneaten_Energy = []\n for coordinates, obj in numpy.ndenumerate(self.object_world): # Iterate through current object_world\n if isinstance(obj, self.Being):\n updated_all_Beings.append(obj)\n if isinstance(obj, self.Energy):\n updated_uneaten_Energy.append(obj)\n self.all_Beings = updated_all_Beings\n self.uneaten_Energy = updated_uneaten_Energy\n for Being in self.all_Beings:\n if len(Being.location) > self.Emperor_length:\n self.Emperor_DNA = Being.DNA\n self.Emperor_length = len(Being.location)\n if len(Being.location) > self.King_length:\n self.Queen_DNA = self.King_DNA\n self.Queen_length = self.King_length\n self.King_DNA = Being.DNA\n self.King_length = len(Being.location)\n elif len(Being.location) > self.Queen_length:\n self.Queen_DNA = Being.DNA\n self.Queen_length = len(Being.location)\n self.World_age = self.World_age + 1\n\n\n def choose_math_operation(self, DNA_strand):\n ops = [\n # Returning number\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.floordiv,\n # operator.pow, # This resulted in TypeError: can't convert complex to float\n operator.mod,\n # Returning boolean (can convert to number)\n # operator.lt,\n # operator.le,\n # operator.eq,\n # operator.gt,\n # operator.ge,\n # operator.ne,\n ]\n random.seed(DNA_strand)\n operation = random.choice(ops)\n return operation\n\n def processing_single_cell(_World, Being, cell, potential_locations, DNA):\n processing_results = []\n try:\n cell_id = _World.id_world[cell[0], cell[1]]\n except: # Cell isn't part of World grid\n cell_id = 0\n processing_results.append(cell_id)\n is_option = int(bool(str(cell_id) in str(potential_locations)))\n processing_results.append(is_option)\n diff_x = cell[0] - Being.head[0]\n processing_results.append(diff_x)\n diff_y = cell[1] - Being.head[0]\n processing_results.append(diff_y)\n '''\n for i in range(3):\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # for i in range (number_of_operations): # Implement this later\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n n = random.uniform(-1,1)\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n operation = _World.choose_math_operation(DNA)\n calc_result = operation(cell_id, n)\n processing_results.append(calc_result)\n '''\n return [processing_results, DNA]\n\n\n def compile_potential_locations(self):\n all_potential_locations = []\n for Being in self.all_Beings:\n try: # Not sure why this runs into following error sometimes: neckX = Being.location[1][0] --> IndexError: list index out of range\n headX = Being.head[0]\n headY = Being.head[1]\n neckX = Being.location[1][0]\n neckY = Being.location[1][1]\n if headX == neckX: # Traveling vertically\n if (headY - neckY) == 1: # Traveling north\n forward = [headX, headY + 1]\n left = [headX - 1, headY]\n right = [headX + 1, headY]\n elif (headY - neckY) == -1: # Traveling south\n forward = [headX, headY - 1]\n left = [headX + 1, headY]\n right = [headX - 1, headY]\n elif headY == neckY: # Traveling horizontally\n if (headX - neckX) == 1: # Traveling east\n forward = [headX + 1, headY]\n left = [headX, headY + 1]\n right = [headX, headY - 1]\n elif (headX - neckX) == -1: # Traveling West\n forward = [headX - 1, headY]\n left = [headX, headY - 1]\n right = [headX, headY + 1]\n all_potential_locations.append(forward)\n all_potential_locations.append(left)\n all_potential_locations.append(right)\n except:\n pass\n self.all_potential_locations = all_potential_locations\n return all_potential_locations\n\n def choose_move(_World, Being):\n\n try:\n\n def vision(_World, Being):\n\n def near_vision(_World, Being):\n x = Being.head[0]\n y = Being.head[1]\n near_vision = [\n [x-2, y+2], [x-1, y+2], [x, y+2], [x+1, y+2], [x+2, y+2],\n [x-2, y+1], [x-1, y+1], [x, y+1], [x+1, y+1], [x+2, y+1],\n [x-2, y], [x-1, y], [x, y], [x+1, y], [x+2, y],\n [x-2, y-1], [x-1, y-1], [x, y-1], [x+1, y-1], [x+2, y-1],\n [x-2, y-2], [x-1, y-2], [x, y-2], [x+1, y-2], [x+2, y-2],\n ]\n return near_vision\n\n def straight_vision(_World, Being):\n\n def north_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x, y+1]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n y = y+1\n return north_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n def south_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x, y-1]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n y = y-1\n return south_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n def east_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x + 1, y]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n x = x + 1\n return east_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n def west_distance_to_object(_World, x, y, distance=1):\n try:\n forward_block = _World.id_world[x-1, y]\n if forward_block == 0.0: # Empty block\n distance = distance + 1\n x = x-1\n return west_distance_to_object(_World, x, y, distance=distance)\n else:\n if forward_block == 2:\n energy = True\n else:\n energy = False\n return [distance, energy]\n except: # forward_block is off the World grid\n energy = False\n return [distance, energy]\n\n headX = Being.head[0]\n headY = Being.head[1]\n neckX = Being.location[1][0]\n neckY = Being.location[1][1]\n return_package = north_distance_to_object(_World, headX, headY)\n north_distance = return_package[0]\n north_energy = return_package[1]\n return_package = south_distance_to_object(_World, headX, headY)\n south_distance = return_package[0]\n south_energy = return_package[1]\n return_package = east_distance_to_object(_World, headX, headY)\n east_distance = return_package[0]\n east_energy = return_package[1]\n return_package = west_distance_to_object(_World, headX, headY)\n west_distance = return_package[0]\n west_energy = return_package[1]\n\n if headX == neckX: # Traveling vertically\n if (headY - neckY) == 1: # Traveling north\n forward = [headX, headY + 1]\n forward_distance = north_distance\n forward_energy = north_energy\n left = [headX - 1, headY]\n left_distance = west_distance\n left_energy = west_energy\n right = [headX + 1, headY]\n right_distance = east_distance\n right_energy = east_energy\n elif (headY - neckY) == -1: # Traveling south\n forward = [headX, headY - 1]\n forward_distance = south_distance\n forward_energy = south_energy\n left = [headX + 1, headY]\n left_distance = east_distance\n left_energy = east_energy\n right = [headX - 1, headY]\n right_distance = west_distance\n right_energy = west_energy\n elif headY == neckY: # Traveling horizontally\n if (headX - neckX) == 1: # Traveling east\n forward = [headX + 1, headY]\n forward_distance = east_distance\n forward_energy = east_energy\n left = [headX, headY + 1]\n left_distance = north_distance\n left_energy = north_energy\n right = [headX, headY - 1]\n right_distance = south_distance\n right_energy = south_energy\n elif (headX - neckX) == -1: # Traveling West\n forward = [headX - 1, headY]\n forward_distance = west_distance\n forward_energy = west_energy\n left = [headX, headY -1]\n left_distance = south_distance\n left_energy = south_energy\n right = [headX, headY + 1]\n right_distance = north_distance\n right_energy = north_energy\n\n if (forward_distance == 2) and (forward_energy == False):\n forward_danger = True\n else:\n forward_danger = False\n if (left_distance == 2) and (left_energy == False):\n left_danger = True\n else:\n left_danger = False\n if (right_distance == 2) and (right_energy == False):\n right_danger = True\n else:\n right_danger = False\n\n die_locations = []\n try:\n if (int(_World.id_world[forward[0], forward[1]]) == 1) and (forward not in Being.location):\n forward_die = 1\n die_locations.append(forward)\n else:\n forward_die = 0\n except: # Off grid\n forward_die = 1\n try:\n if (int(_World.id_world[left[0], left[1]]) == 1) and (left not in Being.location):\n left_die = 1\n die_locations.append(left)\n else:\n left_die = 0\n except: # Off grid\n left_die = 1\n try:\n if (int(_World.id_world[right[0], right[1]]) == 1) and (right not in Being.location):\n right_die = 1\n die_locations.append(right)\n else:\n right_die = 0\n except: # Off grid\n right_die = 1\n\n potential_locations = [forward, left, right]\n distances = [1 - forward_distance/100, 1 - left_distance/100, 1 - right_distance/100]\n whether_energy = [int(forward_energy), int(left_energy), int(right_energy)]\n danger = [int(forward_danger), int(left_danger), int(right_danger)]\n die = [forward_die, left_die, right_die]\n output_package = [potential_locations, distances, whether_energy, danger, die, die_locations]\n return output_package\n\n output_package = straight_vision(_World, Being)\n # near_vision = near_vision(_World, Being)\n\n return output_package\n\n def smell(_World, Being, potential_locations):\n head_location = Being.head\n headX = head_location[0]\n headY = head_location[1]\n uneaten_Energy = _World.uneaten_Energy\n closest_Energy_location = False\n for Energy in uneaten_Energy:\n EnergyX = Energy.location[0][0]\n EnergyY = Energy.location[0][1]\n # print(\"Energy Location: \" + str(EnergyX) + \", \" + str(EnergyY))\n distance = abs(headX-EnergyX) + abs(headY-EnergyY)\n if closest_Energy_location == False:\n closest_Energy_location = Energy.location[0]\n closest_distance = distance\n elif closest_distance > distance:\n closest_Energy_location = Energy.location[0]\n closest_distance = distance\n try:\n CEL_X = closest_Energy_location[0]\n CEL_Y = closest_Energy_location[1]\n closest_potential_location = False\n for option in potential_locations:\n optionX = option[0]\n optionY = option[1]\n distance = abs(CEL_X-optionX) + abs(CEL_Y-optionY)\n if closest_potential_location == False:\n closest_potential_location = option\n closest_distance = distance\n elif closest_distance > distance:\n closest_potential_location = option\n closest_distance = distance\n except:\n closest_potential_location = None\n closest_distance = 0\n return_package = [closest_potential_location, closest_distance]\n return return_package\n\n\n output_package = vision(_World, Being)\n potential_locations = output_package[0]\n distances = output_package[1]\n whether_energy = output_package[2]\n danger = output_package[3]\n die = output_package[4]\n die_locations = output_package[5]\n data_list = distances + whether_energy + danger + die\n return_package = smell(_World, Being, potential_locations)\n potential_location_closest_to_Energy = return_package[0]\n distance_to_closest_Energy = return_package[1]\n data_list.append(distance_to_closest_Energy)\n all_potential_locations = _World.compile_potential_locations()\n pre_processing_results = []\n\n ### OLD WAY:\n # for cell in near_vision:\n # output = _World.processing_single_cell(Being, cell, potential_locations, DNA)\n # results = output[0]\n # DNA = output[1]\n # for result in results:\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # processing_results.append(result * random.uniform(-1,1))\n\n ### NEW WAY (STILL NOT SOPHISTICATED):\n for data in data_list:\n ### Pre-Process Option 1\n pre_processing_results.append(data)\n ### Pre-Process Option 2\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # input = sight_result\n # for i in range (number_of_operations): # Implement this later\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-1,1)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # input = operation(input, n)\n # pre_processing_results.append(input)\n pass\n\n ### Pre-Process Option 3\n # for i in range(5):\n # intake_neuron = []\n # for sight_result in sight_results:\n # input = sight_result\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-1, 1)\n # intake_neuron.append(n * input)\n # intake_result = sum(intake_neuron)\n # pre_processing_results.append(intake_result)\n\n\n processing_results = []\n for pre_processing_result in pre_processing_results:\n ### Option 1\n processing_results.append(pre_processing_result)\n\n ### Option 2\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # input = pre_processing_result\n # for i in range (number_of_operations):\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-1,1)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # input = operation(input, n)\n # processing_results.append(input)\n # for i in range(3):\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # input1 = random.choice(pre_processing_results)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # for i in range (number_of_operations):\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # input2 = random.choice(pre_processing_results)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # try:\n # input1 = operation(input1, input2)\n # except:\n # input1 = 0\n # processing_results.append(input1)\n\n first_loop = True\n for potential_location in potential_locations:\n DNA = Being.DNA\n assessment_value = 0\n for result in processing_results:\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n assessment_value = assessment_value + (result * random.uniform(-1,1))\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n potential_location_conflict_number = (all_potential_locations.count(potential_location) - 1)\n # assessment_value = assessment_value + (potential_location_conflict_number * random.uniform(-100, 100))\n assessment_value = assessment_value + (potential_location_conflict_number * 100) # FOR DEBUGGING PURPOSES\n if potential_location == potential_location_closest_to_Energy:\n ### Option 1\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # variable = distance_to_closest_Energy\n # for i in range(number_of_operations): # Implement this later\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-10, 10)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # variable = operation(variable, n)\n # random.seed(int(str(DNA)[0:8]))\n # DNA = int(str(DNA)[2:])\n # assessment_value = assessment_value + (variable * random.uniform(-1,1))\n ### Option 2\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n n = random.uniform(-10, 10)\n assessment_value = assessment_value + (n * random.uniform(-1,1))\n if potential_location in die_locations:\n ### Option 1\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # number_of_operations = random.choice([1, 2])\n # variable = 1\n # for i in range(number_of_operations): # Implement this later\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # n = random.uniform(-10, 10)\n # DNA = int(str(DNA)[2:])\n # random.seed(int(str(DNA)[0:8]))\n # operation = _World.choose_math_operation(DNA)\n # variable = operation(variable, n)\n # assessment_value = assessment_value + (variable * random.uniform(-10,10))\n ### Option 2\n DNA = int(str(DNA)[2:])\n random.seed(int(str(DNA)[0:8]))\n # assessment_value = assessment_value - (assessment_value * random.uniform(-100,100))\n assessment_value = assessment_value + abs(assessment_value * 100) # FOR DEBUGGING PURPOSES\n if first_loop == True:\n if (potential_location[0] <= 99) and (potential_location[0] >= 0) and (potential_location[1] <= 99) and \\\n (potential_location[1] >= 0): # Off the grid\n chosen_move = potential_location\n first_loop = False\n best = assessment_value\n else:\n if float(assessment_value) < float(best):\n if (potential_location[0] <= 99) and (potential_location[0] >= 0) and (potential_location[1] <= 99) and \\\n (potential_location[1] >= 0): # Off the grid\n chosen_move = potential_location\n return chosen_move\n\n except Exception as e:\n print(\"Uh oh... Ran into error while choosing move: \" + str(e))\n traceback.print_exc()\n return None\n\n def compile_random_spawn_locations(self, spawn_count, spawning_beings=True):\n\n def add_random_location(spawn_locations, spawning_beings=True, taken_locations=[]):\n if spawning_beings == True:\n random.seed()\n random_head_location = [random.choice(range(99)), random.choice(range(99))]\n if random_head_location not in taken_locations:\n x = random_head_location[0]\n y = random_head_location[1]\n potential_neck_locations = [[x, y + 1], [x - 1, y], [x + 1, y], [x, y - 1]]\n random_neck_location = spawn_random_neck_location(random_head_location, potential_neck_locations,\n spawn_locations, taken_locations=taken_locations)\n if random_neck_location != False:\n random_location = [random_head_location, random_neck_location]\n spawn_locations.append(random_location)\n taken_locations.append(random_head_location)\n taken_locations.append(random_neck_location)\n return [spawn_locations, taken_locations]\n else:\n return add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n else:\n return add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n elif spawning_beings == False:\n random_location = [[random.choice(range(99)), random.choice(range(99))]]\n if random_location not in taken_locations:\n spawn_locations.append(random_location)\n taken_locations.append(random_location)\n return [spawn_locations, taken_locations]\n else:\n return add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n\n def spawn_random_neck_location(head_location, potential_neck_locations, spawn_locations, taken_locations=[]):\n random_neck_location = random.choice(potential_neck_locations)\n if random_neck_location not in taken_locations:\n return random_neck_location\n else:\n potential_neck_locations.remove(random_neck_location)\n if not potential_neck_locations:\n return False\n elif potential_neck_locations:\n return spawn_random_neck_location(head_location, potential_neck_locations, spawn_locations,\n taken_locations=taken_locations)\n\n spawn_locations = []\n taken_locations = []\n for coordinates, id in numpy.ndenumerate(self.id_world):\n if id != 0:\n taken_locations.append(coordinates)\n for i in range(spawn_count):\n output = add_random_location(spawn_locations, spawning_beings=spawning_beings,\n taken_locations=taken_locations)\n spawn_locations = output[0]\n taken_locations = output[1]\n return spawn_locations\n\n def spawn_beings(self, spawn_count):\n locations = self.compile_random_spawn_locations(spawn_count, spawning_beings=True)\n Beings = []\n for location in locations:\n spawned_Being = self.Being(self, location)\n counter = 1\n for coordinates in location:\n if counter == 1:\n self.object_world[coordinates[0], coordinates[1]] = spawned_Being\n self.id_world[coordinates[0], coordinates[1]] = spawned_Being.head_id\n counter = counter + 1\n else:\n self.object_world[coordinates[0], coordinates[1]] = spawned_Being.body_id\n self.id_world[coordinates[0], coordinates[1]] = spawned_Being.body_id\n Beings.append(spawned_Being)\n return(Beings)\n\n def spawn_energy(self, spawn_count, energy_count=1):\n locations = self.compile_random_spawn_locations(spawn_count, spawning_beings=False)\n uneaten_Energy = []\n for location in locations:\n spawned_Energy = self.Energy(location, energy_count=energy_count)\n for coordinates in location:\n self.object_world[coordinates[0], coordinates[1]] = spawned_Energy\n self.id_world[coordinates[0], coordinates[1]] = spawned_Energy.energy_id\n uneaten_Energy.append(spawned_Energy)\n return(uneaten_Energy)\n\n # class universal_laws():\n # def __init__(self, location):\n\n # class chromosomes():\n # def __init__(self, location):\n\n\n\ndef operate(a, b, operation):\n try:\n operation(a, b)\n except:\n try:\n operation(a)\n except:\n False\n\nif __name__ == \"__main__\":\n # def main_run():\n world = World()\n plt.imshow(world.id_world)\n plt.clim(0, 30) # colorbar will be based on min value of 0 and max value of 30\n plt.colorbar()\n plt.pause(0.01)\n while True:\n # for i in range(3):\n # if len(world.all_Beings) < 25:\n # print(\"Spawning 25 new Beings\")\n # world.all_Beings = world.all_Beings + world.spawn_beings(25)\n world.update_World()\n plt.clf()\n total_energy = sum([Energy.energy_count for Energy in world.uneaten_Energy] + \\\n [len(Being.location) for Being in world.all_Beings] + \\\n [Being.energy for Being in world.all_Beings])\n plt.title(\"World Age: \" + str(world.World_age) + \"\\nTotal Energy: \" + str(total_energy) + \\\n \"\\nEmperor Length: \" + str(world.Emperor_length) + \"\\n King Length: \" + str(world.King_length))\n plt.imshow(world.id_world)\n plt.colorbar()\n plt.pause(0.01)\n print(\"Loop \" + str(world.World_age) + \" complete. Emperor Length = \" + str(world.Emperor_length) + \\\n \" Emperor_DNA = \" + str(world.Emperor_DNA) + \" + King_DNA = \" + str(world.King_DNA) + \\\n \" Queen_DNA = \" + str(world.Queen_DNA))\n\n\n # import cProfile\n # pr = cProfile.Profile()\n # pr.enable()\n # main_run()\n # pr.disable()\n # pr.print_stats(sort='time')\n\n\n","repo_name":"brokeharvard/FreeWill","sub_path":"slither.py","file_name":"slither.py","file_ext":"py","file_size_in_byte":49129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"72547979713","text":"import sys\nimport os\nimport bpy\nimport json\nimport argparse\n\n# Get the path of the PLUME directory\nif os.path.dirname(__file__) not in sys.path:\n sys.path.append(os.path.dirname(__file__))\n\n# # Fetch the blender directory\nblend_dir = os.path.dirname(bpy.data.filepath)\nif blend_dir not in sys.path:\n sys.path.append(blend_dir)\n\nfrom config import Config, Color\n\nclass MeshGeneration:\n def __init__(self, generation_name_p, index_p) -> None:\n self.generation_name = str(generation_name_p)\n self.index = str(index_p)\n self.path = Config.PLUME_DIR.value+\"/data/raw_data/\"+self.generation_name+\"/\"+self.index+\"/data.json\"\n self.saved_mesh_path = Config.PLUME_DIR.value+\"/data/mesh_files/\"+self.generation_name+\"/\"+self.index+\"/mesh.obj\"\n self.json_file = open(self.path)\n self.data = json.load(self.json_file)\n self.obj = None\n self.mesh = None\n self.generate_mesh()\n exit()\n\n\n def generate_mesh(self):\n \"\"\"\n Main function to create the mesh\n \"\"\"\n self.initial_cleanup()\n verts, edges = self.extract_mesh_data()\n result_loading = self.load_mesh_in_blender(verts_p=verts, edges_p=edges)\n if result_loading == -1:\n print(f\"{Color.FAIL.value}There was a problem while creating the mesh{Color.ENDC.value}\")\n exit()\n self.blender_modifiers()\n self.flip_normals()\n self.export_mesh()\n self.json_file.close()\n\n\n def initial_cleanup(self):\n \"\"\"\n Remove the default object of Blender\n \"\"\"\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects['Cube'].select_set(True)\n bpy.ops.object.delete()\n bpy.data.objects['Camera'].select_set(True)\n bpy.ops.object.delete()\n bpy.data.objects['Light'].select_set(True)\n bpy.ops.object.delete()\n\n\n def extract_mesh_data(self):\n \"\"\"\n Load graph data into python variables (from a dictionary)\n -Use [node_id][\"coordinates\"][\"x\"] or [\"y\"]\n \"\"\"\n verts, edges = [], []\n for i in self.data:\n verts.append([\n self.data[i][\"coordinates\"]['x'],\n self.data[i][\"coordinates\"]['y'],\n 0.0\n ])\n for child in self.data[i]['children']:\n edges.append([\n self.data[i]['id']-1,\n child-1\n ]) \n return verts, edges\n \n\n def load_mesh_in_blender(self, verts_p, edges_p):\n \"\"\"\n Load the verticies and edges in blender\n \"\"\"\n verts = verts_p\n edges = edges_p\n self.mesh = bpy.data.meshes.new('Underground')\n self.obj = bpy.data.objects.new('Underground', self.mesh)\n col = bpy.data.collections.get(\"Collection\")\n col.objects.link(self.obj)\n bpy.context.view_layer.objects.active = self.obj\n\n self.mesh.from_pydata(verts, edges, [])\n if not self.mesh:\n return -1\n\n\n def blender_modifiers(self):\n \"\"\"\n Create and apply modifiers\n \"\"\"\n mod_sub = bpy.ops.object.modifier_add(type='SUBSURF')\n mod_skin = self.obj.modifiers.new('Skin', 'SKIN')\n mod_sub = bpy.ops.object.modifier_add(type='SUBSURF')\n\n # Apply modifiers\n apply_mod = bpy.ops.object.modifier_apply(modifier='Subdivision')\n apply_mod = bpy.ops.object.modifier_apply(modifier='Skin') # Create a mesh skin arount the graph\n apply_mod = bpy.ops.object.modifier_apply(modifier='Subdivision.001')\n\n\n def flip_normals(self):\n \"\"\"\n Flip the normals\n \"\"\"\n bpy.ops.object.editmode_toggle()\n bpy.ops.mesh.select_all(action='SELECT') # Select all faces\n bpy.ops.mesh.flip_normals() # just flip normals\n\n\n def export_mesh(self):\n \"\"\"\n Export the mesh in the desired format\n \"\"\"\n bpy.data.objects['Underground'].select_set(True)\n\n # Make sure the directory exist\n if not os.path.exists(os.path.dirname(self.saved_mesh_path)):\n os.makedirs(os.path.dirname(self.saved_mesh_path))\n\n # Export the mesh\n bpy.ops.wm.obj_export(filepath=self.saved_mesh_path,\n export_selected_objects=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"PLUME project. Mesh generation based on a json file that provide data as x,y,z coordinates. Then based on the created structure, apply a circular skin around it to create the edges of the underground mesh.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n \n parser.add_argument(\"-index\", help=\"Index used for the path\", type=int)\n parser.add_argument(\"-name\", help=\"Name of the current graph generation\", type=str)\n parser.add_argument(\"--background\", action=\"store_true\", help=\"Run the script without GUI\")\n parser.add_argument(\"--python\", action=\"store_true\", help=\"Run blender with a python file\")\n parser.add_argument(\"file\", help=\"Path and name of the python file\")\n \n\n args = parser.parse_args()\n arguments = vars(args)\n generator = MeshGeneration(index_p=arguments['index'],\n generation_name_p=arguments['name'])","repo_name":"Gabryss/P.L.U.M.E","sub_path":"src/blender.py","file_name":"blender.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29449481307","text":"# -*- coding: utf-8 -*-\n\nimport flask\nimport json\nimport route\nimport shutil\nimport datetime\nimport time\nimport os\nimport zipfile\n\nfrom sqlalchemy import and_\n\nfrom handler.log import api_logger\nfrom handler.config import appconfig\nfrom handler.pool import mysqlpool\nfrom handler.socket.deploy import single_deploy\n\nfrom model.mysql import model_mysql_planinfo\nfrom model.mysql import model_mysql_tablesnap\nfrom model.mysql import model_mysql_taskinfo\nfrom model.mysql import model_mysql_userinfo\nfrom model.redis import modle_redis_apitestplanworktable\n\n\"\"\"\n 新增正式测试任务创建接口,此任务仅允许本人创建\n 支持创建接口自动化测试以及接口性能测试任务\n ----校验\n 校验账户是否存在\n 校验账户操作令牌\n 校验账户所属角色是否有API操作权限\n 校验传参\n ----操作\n 检查测试计划以及测试版本是否存在且该测试版本是否为临时版本\n 新增调试任务\n 将测试任务数据打包发送给执行应用\n\"\"\"\n\n\n@route.check_token\n@route.check_user\n# @route.check_auth\n@route.check_post_parameter(\n ['planId', int, 1, None],\n ['description', str, None, 200],\n ['startType', int, 1, 2],\n ['runType', int, 1, 2]\n)\ndef task_post():\n # 初始化返回内容\n response_json = {\n \"code\": 200,\n \"msg\": \"操作成功\",\n \"data\": {}\n }\n\n # 取出数据\n # header\n user_id = flask.request.headers['UserId']\n # body\n plan_id = flask.request.json['planId']\n description = flask.request.json['description']\n start_type = flask.request.json['startType']\n run_type = flask.request.json['runType']\n\n # 如果startType为2则需要检查执行时间\n datetime_start_time = None\n datetime_end_time = None\n if start_type == 2:\n\n # 开始时间检查\n if 'startTime' not in flask.request.json:\n api_logger.debug(\"传参缺少startTime\")\n return route.error_msgs[302]['msg_request_params_incomplete']\n elif type(flask.request.json['startTime']) is not int:\n api_logger.debug(\"传参startTime类型错误\")\n return route.error_msgs[301]['msg_value_type_error']\n elif flask.request.json['startTime'] < int(time.time()):\n api_logger.debug(\"传参startTime大小错误\")\n return route.error_msgs[201]['msg_too_early']\n # 结束时间检查\n if 'endTime' not in flask.request.json:\n api_logger.debug(\"传参缺少endTime\")\n return route.error_msgs[302]['msg_request_params_incomplete']\n elif type(flask.request.json['endTime']) is not int:\n api_logger.debug(\"传参endTime类型错误\")\n return route.error_msgs[301]['msg_value_type_error']\n elif flask.request.json['endTime'] < flask.request.json['startTime'] + 10:\n api_logger.debug(\"传参endTime大小错误\")\n return route.error_msgs[201]['msg_task_time_error']\n try:\n datetime_start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(flask.request.json['startTime']))\n datetime_end_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(flask.request.json['endTime']))\n except:\n return route.error_msgs[201]['msg_data_error']\n # 如果runType为1则需要检查执行次数\n times = None\n if run_type == 1:\n print(1111111)\n if 'times' not in flask.request.json:\n return route.error_msgs[302]['msg_request_params_incomplete']\n elif type(flask.request.json['times']) is not int:\n return route.error_msgs[301]['msg_value_type_error']\n elif flask.request.json['times'] < 1:\n return route.error_msgs[201]['msg_data_error']\n times = flask.request.json['times']\n\n # 为了将来能够看日志,必须要有不变的快照数据,所以tableSnap的不靠谱\n # 尝试于redis读取工作台快照临时数据\n # 如果有,以这些内容发起测试任务\n # 如果无,则读取mysql中最新的内容,发起测试任务\n tablesnap_data = None\n redis_get_table_bytes = modle_redis_apitestplanworktable.query_table(plan_id)\n if redis_get_table_bytes is not None:\n tablesnap_data = redis_get_table_bytes.decode('utf-8')\n else:\n # 根据planId去查询工作台快照内容\n try:\n mysql_tablesnap = model_mysql_tablesnap.query.filter(\n and_(\n model_mysql_tablesnap.planId == plan_id,\n model_mysql_tablesnap.status == 1\n )\n ).first()\n api_logger.debug(\"接口测试计划工作台快照内容查找成功\")\n except Exception as e:\n api_logger.debug(\"接口测试计划工作台快照内容查找失败,失败原因:\" + repr(e))\n return route.error_msgs[500]['msg_db_error']\n else:\n # 如果查询出来存在记录并且为正式版本,则继续,否则返回错误信息\n # 需排除数据异常\n if not mysql_tablesnap:\n return route.error_msgs[201]['msg_no_data']\n else:\n tablesnap_data = mysql_tablesnap.table\n\n # 新增测试任务创建记录\n # 1.准备测试任务基础数据\n new_task_info = model_mysql_taskinfo(\n planId=plan_id,\n snap=tablesnap_data,\n taskType=1,\n startType=start_type,\n endType=run_type,\n taskDescription=description,\n createTime=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n createUser=user_id\n )\n # 剩余未填写项目 startTime/endTime/excuteTimes/if_error/vUser/rampUpPeriod\n # startTime/endTime\n if start_type == 1:\n new_task_info.startTime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n elif start_type == 2:\n new_task_info.startTime = datetime_start_time\n new_task_info.endTime = datetime_end_time\n # excuteTimes\n if run_type == 1:\n new_task_info.excuteTimes = times\n # if_error\n # 暂时不支持自定义\n new_task_info.errorType = 1\n # rampUpPeriod\n # 暂时不支持自定义\n new_task_info.rampUpPeriod = 0\n # vUser\n if 'userNum' in flask.request.json:\n normal_v_user = flask.request.json['userNum']\n if type(normal_v_user) is int and normal_v_user in range(1, 1001):\n new_task_info.vUser = normal_v_user\n else:\n return route.error_msgs[301]['msg_value_type_error']\n else:\n return route.error_msgs[302]['msg_request_params_incomplete']\n # 新增测试任务记录\n\n try:\n mysqlpool.session.add(new_task_info)\n mysqlpool.session.commit()\n except Exception as e:\n api_logger.error(\"新增测试任务失败,原因:\" + repr(e))\n return route.error_msgs[500]['msg_db_error']\n else:\n api_logger.debug(\"新增测试任务成功\")\n\n # 准备待发送的测试任务文件\n # 将工作台内容保存为task.json文件\n # 封装测试任务数据\n # 检查文件存放路径\n if not os.path.exists('file/'):\n api_logger.debug('存放测试任务文件的file主目录不存在,尝试创建...')\n try:\n os.makedirs('file/')\n\n except Exception as e:\n api_logger.error('存放测试任务文件的file目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n\n api_logger.debug('存放测试任务文件的file目录创建成功')\n the_now = datetime.datetime.now()\n the_year = str(the_now.year)\n the_month = str(the_now.month)\n the_day = str(the_now.day)\n if not os.path.exists('file/' + the_year):\n api_logger.debug('年份目录不存在,尝试创建...')\n try:\n os.makedirs('file/' + the_year)\n except Exception as e:\n api_logger.error('年份目录创建失败,���因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('年份目录创建成功')\n if not os.path.exists('file/' + the_year + '/' + the_month):\n api_logger.debug('月份目录不存在,尝试创建...')\n try:\n os.makedirs('file/' + the_year + '/' + the_month)\n except Exception as e:\n api_logger.error('月份目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('月份目录创建成功')\n if not os.path.exists('file/' + the_year + '/' + the_month + '/' + the_day):\n api_logger.debug('日子目录不存在,尝试创建...')\n try:\n os.makedirs('file/' + the_year + '/' + the_month + '/' + the_day)\n except Exception as e:\n api_logger.error('日子目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('日子目录创建成功')\n dir_path = 'file/' + the_year + '/' + the_month + '/' + the_day\n task_dir_path = dir_path + '/task_%s_%s' % (\n str(new_task_info.taskId),\n the_now.strftime('%Y%m%d%H%M%S')\n )\n api_logger.debug('尝试创建测试任务目标目录...')\n try:\n # 于file目录下创建 年/月/日/task_taskId_时间戳 文件夹\n os.makedirs(task_dir_path)\n # 将项目文件夹(其中为参数化文件)复制到task文件夹下\n resource_path = appconfig.get(\"task\", \"filePutDir\")\n resource_path = resource_path[:-1] if resource_path[-1] == \"/\" else resource_path\n resource_path = \"%s/%s\" % (resource_path, plan_id)\n # 根据配置文件中的路径,判断测试计划文件夹是否存在\n if os.path.exists(resource_path) is False or os.path.isdir(resource_path) is False:\n os.makedirs(task_dir_path + '/files')\n else:\n shutil.copytree(resource_path, task_dir_path + '/files')\n except Exception as e:\n api_logger.error('测试任务目标目录创建失败,原因:' + repr(e))\n return route.error_msgs[500]['msg_file_error']\n else:\n api_logger.debug('测试任务目标目录创建成功')\n # 将测试任务数据存为json文件\n file = open(task_dir_path + '/task.json', 'w', encoding='utf-8')\n file.write(tablesnap_data)\n file.close()\n # 将文件夹整个进行zip压缩\n z = zipfile.ZipFile(task_dir_path + '.zip', 'w', zipfile.ZIP_DEFLATED)\n # 将task.json/file添加入压缩包\n z.write(os.path.join(task_dir_path, 'task.json'), 'task.json')\n z.write(os.path.join(task_dir_path, 'files'), 'files')\n # 将file文件夹下所有文件添加入压缩包\n for dir_path, dir_names, file_names in os.walk(os.path.join(task_dir_path, 'files')):\n for fn in file_names:\n if fn not in z.NameToInfo:\n z.write(os.path.join(dir_path, fn), os.path.join('files', fn))\n z.close()\n\n # 查询计划类型\n try:\n mysql_planinfo = model_mysql_planinfo.query.filter(\n model_mysql_planinfo.planId == plan_id\n ).first()\n except Exception as e:\n api_logger.debug(\"model_mysql_planinfo数据读取失败,失败原因:\" + repr(e))\n return route.error_msgs[500]['msg_db_error']\n else:\n # 根据计划类型下发测试任务\n if mysql_planinfo.planType == 1:\n deploy_result, deploy_msg = single_deploy(\n base=new_task_info,\n file=task_dir_path + '.zip'\n )\n if not deploy_result:\n print(7777)\n response_json['code'] = 500\n response_json['error_msg'] = '测试任务下发失败,原因:%s,请联系管理员或稍后再发起测试任务' % deploy_msg\n return json.dumps(response_json)\n\n return response_json\n","repo_name":"erikshe2003/qaplatform_api","sub_path":"route/api/task/restful_task/post/v1_0_0.py","file_name":"v1_0_0.py","file_ext":"py","file_size_in_byte":12044,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18052222995","text":"from django import forms\n\nfrom django_summernote.widgets import SummernoteWidget\n\nfrom journal import models\n\nclass SubmissionForm(forms.ModelForm):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(SubmissionForm, self).__init__(*args, **kwargs)\n\t\tself.fields['subtitle'].required = False\n\t\tself.fields['manuscript_file'].required = False\n\n\tdef save(self, commit=True, request=None):\n\t\tarticle = super(SubmissionForm, self).save(commit=False)\n\t\tarticle.owner = request.user\n\n\t\tif commit:\n\t\t\tarticle.save()\n\n\t\treturn article\n\n\n\tclass Meta:\n\t\tmodel = models.Article\n\t\texclude = ('owner', 'authors', 'date_submitted', 'date_published', 'open_for_comments', 'doi')\n\t\twidgets = {\n\t\t\t'abstract': SummernoteWidget(),\n\t\t}\n\n\n","repo_name":"ajrbyers/panacea","sub_path":"src/journal/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"27615641122","text":"import sys\nclass Sol2:\n # @param A : array of prices\n # @return an integer\n def maxProfit(self, price):\n maxprofit = 0\n globalmin = sys.maxint\n for i in range(len(price)):\n if price[i] < globalmin:\n globalmin = price[i]\n if (price[i] - globalmin) > maxprofit:\n maxprofit = price[i] - globalmin\n return maxprofit\n\nimport sys\nclass Buy2:\n # Strategy: trigger a buy whenever a minimum occurs\n # Tigger a sell whenever a maximum occurs\n def maxProfit(self, price):\n profit = 0\n curr_profit = 0\n curr_min = sys.maxint\n for i in range(len(price)):\n if price[i] < curr_min:\n curr_min = price[i]\n if (price[i] - curr_min) > curr_profit:\n curr_profit = price[i] - curr_min\n if (i > 1) and (price[i] < price[i-1]):\n # Sell at i-1, lock in profit!\n profit += curr_profit\n curr_profit = 0\n curr_min = price[i]\n # lock in any remaining profits\n profit += curr_profit\n return profit\n\n\ndef main():\n sol = Buy2()\n print(sol.maxProfit([1,2,3,0,2,7,3,3,3,4]))\nmain()\n\n","repo_name":"digimutts/xyz","sub_path":"buy.py","file_name":"buy.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15879267959","text":"import bpy\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom modules.mesh_test import SpecMeshTest, OperatorSpecEditMode, RunTest\n\n\ndef main():\n tests = [\n\n SpecMeshTest('Cubecube_intersect_union', 'Cubecube', 'Cubecube_result_1',\n [OperatorSpecEditMode('intersect_boolean',\n {'operation': 'UNION', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_intersect', 'Cubecube', 'Cubecube_result_2',\n [OperatorSpecEditMode('intersect_boolean', {'operation': 'INTERSECT', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_difference', 'Cubecube', 'Cubecube_result_3',\n [OperatorSpecEditMode('intersect_boolean', {'operation': 'DIFFERENCE', 'solver': 'FAST'}, 'FACE',\n {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_cut', 'Cubecube', 'Cubecube_result_4', [OperatorSpecEditMode('intersect',\n {'separate_mode': 'CUT', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_all', 'Cubecube', 'Cubecube_result_5',\n [OperatorSpecEditMode('intersect',\n {'separate_mode': 'ALL', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_none', 'Cubecube', 'Cubecube_result_6',\n [OperatorSpecEditMode('intersect',\n {'separate_mode': 'NONE', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n SpecMeshTest('Cubecube_intersect_select_none', 'Cubecube',\n 'Cubecube_result_7',\n [OperatorSpecEditMode('intersect',\n {'mode': 'SELECT', 'separate_mode': 'NONE', 'solver': 'FAST'}, 'FACE',\n {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, )]),\n SpecMeshTest('Cubecone_intersect_union', 'Cubecone', 'Cubecone_result_1',\n [OperatorSpecEditMode('intersect_boolean',\n {'operation': 'UNION', 'solver': 'FAST'}, 'FACE', {6, 7, 8, 9, 10}, )]),\n SpecMeshTest('Cubecones_intersect_union', 'Cubecones', 'Cubecones_result_1',\n [OperatorSpecEditMode('intersect_boolean', {'operation': 'UNION', 'solver': 'FAST'}, 'FACE', {0, 1, 2, 3, 4, 5}, )]),\n\n ]\n\n operator_test = RunTest(tests)\n\n command = list(sys.argv)\n for i, cmd in enumerate(command):\n if cmd == \"--run-all-tests\":\n operator_test.do_compare = True\n operator_test.run_all_tests()\n break\n elif cmd == \"--run-test\":\n name = command[i + 1]\n operator_test.do_compare = False\n operator_test.run_test(name)\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blender/blender","sub_path":"tests/python/boolean_operator.py","file_name":"boolean_operator.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"}
+{"seq_id":"74326920833","text":"\"\"\"\n@testcase\n@description 2.6 TP-PRE-TC-06: Frequency agility\n\n@tags\n POSITIVE\n\n@connection dummyPort = router\n\"\"\"\n\n#*****************************************************************************************\n#Defines section\n#*****************************************************************************************\nimport sys\nsys.path.append(scriptPath)\nfrom common import *\nfrom deviceScanner import *\nsys.path.remove(scriptPath)\n\n#*****************************************************************************************\n# Initialization\n#*****************************************************************************************\\\nportList = []\n\nconfigureCommunication()\n\ned1 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_END_DEVICE, portList)\ned2 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_END_DEVICE, portList)\nr1 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_ROUTER, portList)\nr2 = deviceScannerGetAssociatedPort(TEST_DEVICE_TYPE_ROUTER, portList)\n\n#*****************************************************************************************\n# Test preparation\n#*****************************************************************************************\\\nresetRouterToFN([r1, r2])\nresetEndDeviceToFN([ed1, ed2])\nclearPorts([r1, r2, ed1, ed2])\npowerOff([r1, r2, ed1, ed2])\n\ned1ExtAddr = getExtAddr(ed1)\nwriteLog(\"ED1 extended address - %016X\" % ed1ExtAddr)\n\nr1ExtAddr = getExtAddr(r1)\nwriteLog(\"R1 extended address - %016X\" % r1ExtAddr)\n\nr2ExtAddr = getExtAddr(r2)\nwriteLog(\"R2 extended address - %016X\" % r1ExtAddr)\n\nwriteLog(\"P1 Power on ED1 and R1\")\npowerOn([ed1, r1])\n\nwriteLog(\"P2 Initiate touchlink on ED1 with R1\")\ntouchlink(ed1, r1)\nidle([ed1, r1])\n\nwriteLog(\"P3 Power off ED1 and R1\")\npowerOff([ed1, r1])\n\n#*****************************************************************************************\n# Test body\n#*****************************************************************************************\\\nwriteLog(\"1a Power on ZR1\")\npowerOn([r1])\nwriteLog(\"1b ZR1 announces itself (Shall be checked by a sniffer\")\n\nwriteLog(\"2a Power on ED1\")\nsendCommand(ed1, resetCmd)\nwriteLog(\"2b,2c,2d ED1 rejoins its previous network (Shall be checked by a sniffer)\")\nreceiveAndCheck(ed1, connectedStr)\nidle([ed1, r1])\n\ncurrentChannel = getChannel(ed1)\nchannelMask = getChannelMask(ed1)\ntargetChannel = getNextChannel(channelMask, currentChannel)\ncheck(targetChannel != 0) # Channel mask shall contain more than one channel to pass this test\nwriteLog(\"Current channel: %d; channelMask: %08x\" % (currentChannel, channelMask))\nwriteLog(\"Target channel: %d\" % targetChannel)\n\nsleep(5)\n\nwriteLog(\"3 ED1 broadcasts MgmtNwkUpdateReq to all RxOnWhenIdle devices\")\nsendNwkMgmtUpdateReq(ed1, targetChannel, 0xFE, 0xFFFD)\nwriteLog(\"4 R1 changes its channel\")\nwriteLog(\"5a, 5b ED1 rejoins the network\")\nwriteLog(\"Disconnected indication on ED1\")\nreceiveAndCheck(ed1, disconnectedStr)\nwriteLog(\"Connected indication on ED1\")\nreceiveAndCheck(ed1, connectedStr)\n","repo_name":"binaryArrow/airquality","sub_path":"BitCloud_Dateien/Evaluation Tools/ZLL_Scripts/2.7.py","file_name":"2.7.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36915740401","text":"import subprocess\nimport sys\nimport traceback\n\n\ndef runWithStdoutSync(args):\n try:\n # fail, no quoted: consul agent -server -data-dir=\"/tmp/consul\" -bootstrap-expect 3 -bind=192.168.4.108 -client=192.168.4.108\n # If passing a single string, either shell must be True (see below) or else the string must simply name the program to be executed without specifying any arguments.\n # remove universal_newlines=True\n # multiple param\n print(\"Start process Now: \" + \" \".join(args))\n\n # OSError: [Errno 8] Exec format error 可能是可执行文件不存在,导致报错.\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, bufsize=1)\n\n with proc.stdout as out:\n while True:\n line = out.readline()\n if line != b\"\":\n line = line.strip().decode(\"utf8\")\n if line != \"\":\n # OnStart: Found error: 'ascii' codec can't encode character '\\xb5' in position 66: ordinal not in range(128)\n print(line.encode(\"utf8\").decode(\"utf8\"))\n else:\n # print(\"End of stdout, will break out loop...\")\n break\n\n proc.wait()\n #print(\"Exit code:\"+str(proc.returncode))\n return proc\n\n except subprocess.CalledProcessError as err:\n print(\"Found CalledProcessError:\", err, err.output)\n print(traceback.format_exc())\n print(\"Will kill subprocess and exit now...\")\n\n proc.kill()\n proc.wait()\n sys.exit(1)\n\n except Exception as err:\n print(\"Found error:\", err)\n print(traceback.format_exc())\n print(\"Will kill subprocess and exit now...\")\n\n proc.kill() #UnboundLocalError: local variable 'proc' referenced before assignment\n proc.wait()\n sys.exit(1)\n\n\n# ImportError: No module named cluster.utils\n# see readme.md set PYTHONPATH\nif __name__ == \"__main__\":\n runWithStdoutSync([\"date\"])\n runWithStdoutSync([\"ls\", \"-l\"])\n runWithStdoutSync([\"consul\", \"agent\", \"-server\"])\n\n # will exit\n # runWithStdoutSync([\"fdfdss\"])\n\n runWithStdoutSync([\"consul\", \"agent\", \"-server\",\n \"-data-dir=/tmp/consul\",\n \"-bootstrap-expect\", \"3\",\n \"-bind=192.168.4.108\",\n \"-node=consulTmpTest1-172.17.0.3\",\n \"-client=192.168.4.108\"])\n","repo_name":"hanhui870/zooinit","sub_path":"script/subcall/runcmd.py","file_name":"runcmd.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"28375786702","text":"# put your python code here\nstart = int(input())\nend = int(input()) + 1\nmultiples = []\n\nfor number in range(start, end):\n if number % 3 == 0:\n multiples.append(number)\n\nprint(sum(multiples) / len(multiples))\n","repo_name":"bitzsalex/PythonCoreJetBrains","sub_path":"Coffee Machine/Topics/For loop/The average of all numbers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37193211331","text":"# Given an unsorted array of integers nums, return the length of the longest consecutive elements sequence.\n# You must write an algorithm that runs in O(n) time.\n\ndef longestConsecutive(nums):\n longest, numarray = 0, set(nums)\n\n for num in numarray:\n if num - 1 not in nums:\n length = 1\n while num+length in numarray:\n length += 1\n longest = max(longest, length)\n\n return longest\n","repo_name":"ManeeshBusi/Neetcode150","sub_path":"1-Arrays&Hashing/9-LongestConsecutiveSequence.py","file_name":"9-LongestConsecutiveSequence.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3482749906","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\n# choices are used in the form, but not enforced at the model level\nEVENT_TYPES = [(v,v) for v in (\n ('Aerial Bombardment'),\n ('Artillery Bombardment'),\n ('Attack: IED'),\n ('Attack: Mine'),\n ('Attack: Small Arms'),\n ('Attack: Mechanized'),\n ('Attack on Vehicles'),\n ('Armed Incursion'),\n ('Troop Movement'),\n ('Civilian Displacement'),\n)]\n\nEVIDENCE_SOURCES = [(v,v) for v in (\n ('Confidential Source'),\n ('Satellite Image'),\n ('NGO Report'),\n ('UN Report'),\n ('Media Report'),\n ('Government Report'),\n ('HSBA - Small Arms Survey'),\n ('SVM (Sudan Vote Monitor)'),\n)]\n\nACTORS = [(v,v) for v in (\n ('SAF (Sudan Armed Forces)'),\n ('NGO (Non-Governmental Organization)'),\n ('SPLA/GOSS (Sudan\\'s People Liberation Army/Government of South Sudan)'),\n ('UNMIS (United Nations Mission In Sudan)'),\n ('Militia - Specify militia'),\n ('PDF (Popular Defense Force)'),\n)]\n\nclass Event(models.Model):\n summary = models.CharField(max_length=255, verbose_name=\"Summary\", help_text=\"Short summary of event (one phrase or sentence)\")\n type = models.CharField(max_length=255, choices=EVENT_TYPES, verbose_name=\"Event Type\")\n date = models.DateField(verbose_name=\"Event Date\", help_text=\"Date on which the event occurred\")\n location = models.CharField(max_length=255, verbose_name=\"Location\", help_text=\"Where the event occurred\")\n lat = models.FloatField(blank=True, verbose_name=\"Latitude\")\n lon = models.FloatField(blank=True, verbose_name=\"Longitude\")\n actor = models.CharField(max_length=255, choices=ACTORS, help_text=\"The group or persons responsible for the event\")\n population = models.CharField(max_length=255, verbose_name=\"Affected Population\", \n help_text='Specify \"Internally Displaced Persons\", \"Residents of [Town]\", a particular military unit, etc')\n notes = models.TextField(blank=True, verbose_name=\"Notes\")\n logger = models.ForeignKey(User)\n \n class Meta:\n ordering = ['-date']\n \n def __unicode__(self):\n return self.name\n \n def get_absolute_url(self):\n return reverse('event_view', kwargs={\"id\": self.id})\n\nclass Evidence(models.Model):\n summary = models.CharField(max_length=255, verbose_name=\"Summary\", help_text=\"Short summary of evidence (one phrase or sentence)\")\n event = models.ForeignKey(Event, verbose_name=\"Related Event\", help_text=\"The event to which this evidence is related, if known\")\n source = models.CharField(max_length=255, choices=EVIDENCE_SOURCES, verbose_name=\"Source\")\n confidential_id = models.CharField(max_length=255, verbose_name=\"Confidential Source ID\")\n confidential_link = models.CharField(max_length=255, verbose_name=\"Confidential Source Link/File Number\")\n source_link = models.CharField(max_length=255, verbose_name=\"Source Link\", help_text=\"URL of source report or image\")\n # do we need a file upload option here?\n notes = models.TextField(blank=True, verbose_name=\"Notes\")\n logger = models.ForeignKey(User)\n ","repo_name":"nrabinowitz/ssp_tracker","sub_path":"tracker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"23626427671","text":"#!/usr/bin/python\n\n\ndef cread(fd):\n return fd.readline().strip('\\n')\n\n\nBLUE = \"#\"\n\nRED = \"/\\\\\"\n\ndef solve(fd):\n\n R, C = [ int(x) for x in cread(fd).split() ]\n\n Tiles = [None]*R\n for i in xrange(R):\n Tiles[i] = list(cread(fd))\n\n # Lests go row by row\n for i in xrange(R):\n for j in xrange(C):\n if Tiles[i][j]==BLUE:\n # Cover it!\n if i==R-1 or j==C-1:\n # We are at a border :(\n return None\n if ( Tiles[i][j+1]!=BLUE or\n Tiles[i+1][j]!=BLUE or\n Tiles[i+1][j+1]!=BLUE):\n return None\n Tiles[i][j] = \"/\"\n Tiles[i][j+1] = \"\\\\\"\n Tiles[i+1][j] = \"\\\\\"\n Tiles[i+1][j+1] = \"/\"\n return Tiles\n\n\nimport sys\n\nif len(sys.argv)<2:\n fd = sys.stdin\nelse:\n fd = open(sys.argv[1], 'r')\n\nT = int(cread(fd))\n\nfor i in xrange(T):\n sol = solve(fd)\n if sol is None:\n txt = \"\\nImpossible\"\n else:\n txt = \"\"\n for row in sol:\n txt += \"\\n\" + ''.join(row)\n print(\"Case #%d:\" % (i+1) + txt)\n \nfd.close()\n \n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_84/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23579183201","text":"#!/usr/bin/env python3\n\nfrom sys import stdin\n\ndef solve(ifs):\n D, N = [int(v) for v in ifs.readline().strip().split(' ')]\n horses = [(float(v[0]), float(v[1])) \n for v in (ifs.readline().strip().split(' ') for i in range(N))]\n t_reach = [(D - h[0]) / h[1] for h in horses]\n #print(t_reach)\n\n return str(D / max(t_reach))\n\nif __name__ == '__main__':\n\tT = int(stdin.readline())\n\t#print(T, 'cases to evaluate')\n\tfor i in range(T):\n\t\tresult = solve(stdin)\n\t\tprint('Case #' + str(i + 1) + ': ' + result)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/1467.py","file_name":"1467.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7894382247","text":"project = 'aaindexer'\ncopyright = '2022, Michael Milton'\nauthor = 'Michael Milton'\nrelease = '0.1.0'\nextensions = [\n 'sphinxcontrib.restbuilder',\n 'sphinx.ext.autodoc',\n 'sphinx_click'\n]\nautodoc_typehints = \"description\"\nautodoc_class_signature = \"separated\"\ntemplates_path = ['_templates']\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.rst']\nhtml_theme = 'alabaster'\nhtml_static_path = ['_static']\n","repo_name":"multimeric/aaindexer","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43258391862","text":"import unicodedata\nimport re\nimport json\n\nimport nltk\nfrom nltk.tokenize.toktok import ToktokTokenizer\nfrom nltk.corpus import stopwords\n\nimport pandas as pd\n\ndef basic_clean(string):\n string = string.lower()\n string = (unicodedata.normalize('NFKD', string)\n .encode('ascii', 'ignore')\n .decode('utf-8', 'ignore')\n )\n string = re.sub(r\"[^a-z0-9'\\s]\", '', string)\n return string\n\ndef tokenize(string):\n tokenizer = nltk.tokenize.ToktokTokenizer()\n return tokenizer.tokenize(string, return_str=True)\n\ndef stem(string):\n ps = nltk.porter.PorterStemmer()\n stems = [ps.stem(word) for word in string.split()]\n return ' '.join(stems)\n\ndef lemmatize(string):\n wnl = nltk.stem.WordNetLemmatizer()\n lemmas = [wnl.lemmatize(word) for word in string.split()]\n return ' '.join(lemmas)\n\ndef remove_stopwords(string, extra_words=[], exclude_words=[]):\n stopword_list = stopwords.words('english')\n \n for word in extra_words:\n stopword_list.append(word)\n \n for word in exclude_words:\n stopword_list.remove(word)\n \n words = string.split()\n filtered_words = [word for word in words if word not in stopword_list]\n return ' '.join(filtered_words)\n\ndef prepare_article_data(df, column):\n clean_tokens = df[column].apply(basic_clean).apply(tokenize)\n df['stemmed'] = clean_tokens.apply(stem)\n df['lemmatized'] = clean_tokens.apply(lemmatize)\n df['clean'] = clean_tokens.apply(remove_stopwords)\n return df","repo_name":"RyanMcCall/natural-language-processing","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8060624317","text":"from fastapi import FastAPI, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nimport sentry_sdk\nfrom sentry_sdk.integrations.starlette import StarletteIntegration\nfrom sentry_sdk.integrations.fastapi import FastApiIntegration\n\nfrom src.driver.session import SessionManager\nfrom src.routers import (\n user_router,\n product_router,\n predictor_router,\n)\nfrom .di import injector\nfrom .exceptions import BaseException\nfrom .constants import (\n SENTRY_DSN,\n APP_ENV,\n)\n\napp = FastAPI()\n\nsentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[\n StarletteIntegration(),\n FastApiIntegration(),\n ],\n environment=APP_ENV,\n send_default_pii=True,\n attach_stacktrace=True,\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production,\n traces_sample_rate=0.5,\n)\n\napp.include_router(user_router)\napp.include_router(product_router)\napp.include_router(predictor_router)\napp.add_middleware(\n CORSMiddleware,\n allow_origins = [\"*\"],\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*'],\n)\n\n\n@app.on_event(\"startup\")\ndef startup():\n client = injector.get(SessionManager).get_client()\n print(\"[Startup] Connecting to the MongoDB database ..\")\n print(client.server_info())\n print(\"[Startup] Connected to the MongoDB database!\")\n\n\n@app.on_event(\"shutdown\")\ndef shutdown():\n client = injector.get(SessionManager).get_client()\n print(\"[Shutdown] Disconnecting from the MongoDB database ..\")\n client.close()\n print(\"[Shutdown] Disconnected from the MongoDB database!\")\n\n\n@app.exception_handler(Exception)\ndef unicorn_exception_handler(request: Request, exc: BaseException):\n code = getattr(exc, \"code\", 500)\n message = getattr(exc, \"message\", str(exc))\n print(code, message)\n return JSONResponse(\n status_code=code,\n content=message,\n )\n\n\n@app.get(\"/ping\")\nasync def ping():\n return {\"message\": \"pong\"}\n","repo_name":"devcamp18/group18-monorepo","sub_path":"backend/src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3893120230","text":"#!/usr/bin/python\n\nimport codecs\n\n\nINDENT = ' ' * 4\n\n\nH_SRC = \"\"\"\n#ifndef CC_PREPROCESS_DESTUTTERER_DATA_H_\n#define CC_PREPROCESS_DESTUTTERER_DATA_H_\n\n#include \n#include \n#include \n\nnamespace destutterer_data {\n\nextern uint32_t DIGITS[];\n\nextern size_t DIGITS_SIZE;\n\n} // namespace destutterer_data\n\n#endif // CC_PREPROCESS_DESTUTTERER_DATA_H_\n\"\"\"[1:]\n\n\nCC_SRC = \"\"\"\n#include \"destutterer_data.h\"\n\nnamespace destutterer_data {\n\nuint32_t DIGITS[] = {\n%s\n};\n\nsize_t DIGITS_SIZE = sizeof(DIGITS) / sizeof(DIGITS[0]);\n\n} // namespace destutterer_data\n\"\"\"[1:]\n\n\ndef parse_code_point(s):\n assert s.startswith('U+')\n return int(s[2:], 16)\n\n\ndef get(file_name):\n code2name = {}\n with codecs.open(file_name, encoding='utf-8') as f:\n for line in f:\n ss = line.split()\n code = parse_code_point(ss[0])\n name = ' '.join(ss[1:-1])\n value = ss[-1]\n assert code not in code2name\n code2name[code] = name\n return code2name\n\n\ndef dump_h(file_name):\n with open(file_name, 'wb') as f:\n f.write(H_SRC)\n\n\ndef dump_cc(code2name, file_name):\n with open(file_name, 'wb') as f:\n lines = []\n for code in sorted(code2name):\n name = code2name[code]\n lines.append('%s0x%x, // %s' % (INDENT, code, name))\n f.write(CC_SRC % '\\n'.join(lines))\n\n\ndef main():\n dump_h('../../destutterer_data.h')\n\n code2name = get('digits.txt')\n dump_cc(code2name, '../../destutterer_data.cc')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"knighton/phraser","sub_path":"phraser/cc/preprocess/data_import/destutter/generate_destutterer_data.py","file_name":"generate_destutterer_data.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23499314411","text":"import fileinput\r\n\r\ndef pan_cake(Cake):\r\n\r\n count = 0\r\n for i in range(len(Cake)):\r\n if Cake[len(Cake)-1-i] == 0:\r\n for j in range(len(Cake)-1-i):\r\n if Cake[j] == 1:\r\n Cake[j] = 0\r\n else:\r\n Cake[j] = 1\r\n count += 1\r\n \r\n return count\r\n\r\nif __name__ == \"__main__\":\r\n\r\n input_file = open(\"B-large.in\")\r\n output_file = open(\"B-large.out\",\"w\")\r\n\r\n T = int(input_file.readline())\r\n #T = input()\r\n result_array = []\r\n \r\n for i in range(T):\r\n Input = input_file.readline()\r\n #Input = raw_input()\r\n Cake = []\r\n for j in range(len(Input)):\r\n if Input[j] == \"+\":\r\n Cake.append(1)\r\n elif Input[j] == \"-\":\r\n Cake.append(0)\r\n result = pan_cake(Cake)\r\n result_array.append(result)\r\n \r\n for i in range(T):\r\n #print(\"Case #\"+str(i+1)+\": \" + str(result_array[i]))\r\n output_file.write(\"Case #\"+str(i+1)+\": \" + str(result_array[i]) + \"\\n\")\r\n output_file.close()\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/3855.py","file_name":"3855.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41669755877","text":"from quoridor import Quoridor\nimport copy\nimport sys, time, random\nimport numpy as np\n\ndef record_time(func):\n def dec(*args, **kw):\n start = time.time()\n ret = func(*args, **kw)\n end = time.time()\n print('call %s():' % func.__name__, end=' ')\n print('spend time:', end - start, 's')\n return ret\n return dec\n\nINF = 100000000\n\ndef evaluate_fn(board):\n oppo_dis = board.oppo_distance()\n self_dis = board.self_distance()\n # self_walls = board.wall_remaining[1]\n # oppo_walls = board.wall_remaining[-1]\n return 100 * (oppo_dis - self_dis) # + int(random.random() * 100)\n # return 0\n\ndef _minimax(board, depth, alpha, beta, max_layer):\n if depth == 0:\n return evaluate_fn(board), None\n end, _ = board.check_end()\n if end:\n return evaluate_fn(board), None\n\n if max_layer:\n max_eva = -INF\n valids = board.valid_actions()\n m_action = None\n for action in valids:\n board_copy = copy.deepcopy(board)\n board_copy.take_action(action)\n eva = _minimax(board_copy, depth-1, alpha, beta, False)[0]\n if eva > max_eva:\n max_eva = eva\n m_action = action\n alpha= max(alpha, max_eva)\n if beta <= alpha:\n break\n return max_eva, m_action\n else:\n min_eva = INF\n board.alter()\n valids = board.valid_actions()\n m_action = None\n for action in valids:\n board_copy = copy.deepcopy(board)\n board_copy.take_action(action)\n board_copy.alter()\n eva = _minimax(board_copy, depth-1, alpha, beta, True)[0]\n if eva < min_eva:\n min_eva = eva\n m_action = action \n beta = min(beta, eva)\n if beta <= alpha:\n break\n return min_eva, m_action\n\ndef minimax(board, depth):\n '''\n 输入一个Quoridor棋盘对象\n 输出当前玩家的决策\n '''\n return _minimax(board, depth=depth, alpha=-INF, beta=INF, max_layer=True)[1]\n\ndef create_board(self_loc, oppo_loc, self_walls, oppo_walls, walls):\n q = Quoridor()\n q._self_loc = self_loc\n q._oppo_loc = oppo_loc\n q.wall_remaining[1] = self_walls\n q.wall_remaining[-1] = oppo_walls\n q._walls[:] = walls[:]\n return q\n\ndef main():\n board = create_board(3, 45, 8, 8, np.zeros(36))\n action = minimax(board, 2)\n print('action = ', action)\n board.print_board()\n board.take_action(action)\n board.print_board()\n print(action)\n\ndef interpret(action):\n if action[0] == 'h':\n row = int(action[1])\n col = int(action[2])\n return 16 + row * 6 + col\n elif action[0] == 'v':\n row = int(action[1])\n col = int(action[2])\n return 16 + 36 + row * 6 + col\n else:\n return int(action)\n\ndef game():\n counter = 0\n depth = 1\n q = Quoridor()\n while True:\n q.print_board()\n print(q.valid_actions())\n action = input('please input action code:')\n action = interpret(action)\n q.take_action(action)\n end, winner = q.check_end()\n if end:\n print('Game over, winner is ', winner)\n break\n\n q.alter()\n counter += 1\n if counter > 12:\n depth = 2\n action = minimax(q, depth)\n q.take_action(action)\n end, winner = q.check_end()\n if end:\n print('Game over, winner is ', winner)\n break\n q.alter()\n\n\nif __name__ == '__main__':\n game()","repo_name":"xzymustbexzy/alpha_quoridor","sub_path":"game/minimax_player.py","file_name":"minimax_player.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12948321908","text":"from urlparse import urlparse\nimport httplib, sys, multiprocessing\nimport os\nimport signal\nimport json\nimport time\nfrom datetime import datetime\nimport logging\nimport logging.config\nfrom monascaclient import client\n\nkeystone = {\n 'username': 'mini-mon',\n 'password': 'password',\n 'project': 'test',\n 'auth_url': 'http://192.168.10.5:35357/v3'\n}\n\n# Run as \"python agent_simulator.py\n\nRNDD_KAFKA0002 = 'http://127.0.0.1:8080/v2.0/metrics'\nMINI_MON = 'http://192.168.10.4:8080/v2.0/'\n\n# select which API URL to use\napi_url = MINI_MON\n\n# num_process x num_requests will be the number of http connections. \n# beware that 20,000 connections will cause too many ephemeral ports used\n# on a single api server (with one ipaddress). Would recommend not greater than 1000\nnum_processes = 1\n\n# number of requests sent per interval (normally 1-20 max if doing continuous)\nnum_requests = 2\n\n# the agent sends anywhere between 40-360 metrics per request\nnum_metrics_per_request = 100\n\n# (for continuous) The seconds to wait to send metrics. valid range 1-60 (lowest recommended is 10 by the agent)\nagent_interval = 60\n\n# when False runs once, when True runs continuously sending num_requests every interval.\ncontinuous = False\n\nlog = logging.getLogger(__name__)\n\n\nprocessors = [] # global list to facilitate clean signal handling\nexiting = False\n\nclass MetricCreatorSimple():\n \"\"\" Generates metrics\n \"\"\"\n def __init__(self, proc_num):\n self.proc_num = proc_num\n self.num_calls = 0\n self.start_time = int((time.time() - 120)*1000)\n\n def create_metric(self):\n metric = {\"name\": \"cube\" + str(self.proc_num),\n \"dimensions\": {\"hostname\": \"server-\" + str(self.proc_num)},\n \"timestamp\": self.start_time+self.num_calls,\n \"value\": self.num_calls}\n self.num_calls += 1\n return metric\n\nclass agent_sim_process():\n \"\"\"Simulate a monasca agent\n arguments\n proc_num - identifying number for the agent\n num_requests - how many requests the agent makes per interval\n num_metrics - how many metrics are in each request\n continuous - run once or forever\n queue - (multiprocessing.Queue) if provided, agent will use to report number of metrics sent\n metric_creator - agent will call \"create_metric\" method from this object and will pass in proc_num\n token - what token should the agent use, will generate its own token if none provided\n\n The process will report the number of metrics for each batch request to the q, it will also send exceptions\n it encounters. If no queue is provided, it will print these instead.\n \"\"\"\n def __init__(self, proc_num, num_requests, num_metrics, api_url, keystone_dict, continuous=False, interval=60, queue=None,\n metric_creator=MetricCreatorSimple, token=None):\n self.proc_num = proc_num\n self.num_requests = num_requests\n self.num_metrics = num_metrics\n self.interval = interval\n self.continuous = continuous\n self.queue = queue\n if not token:\n try:\n token = ksclient.KSClient(**keystone_dict).token\n except Exception as ex:\n print(\"Agent {}: Failed to get auth token from keystone\\n{}\".format(self.proc_num, keystone_dict))\n #print(\"Using token: \" + token)\n self.mon_client = client.Client('2_0', api_url, session=token)\n self.metric_creator = metric_creator(proc_num)\n #print(\"Created agent {}\".format(self.proc_num))\n\n def do_work_continuously(self):\n while True:\n start_send = time.time()\n for x in xrange(self.num_requests):\n self.post_metrics()\n end_send = time.time()\n\n secs = end_send - start_send\n if secs < self.interval:\n sleep_interval = self.interval - secs\n else:\n sleep_interval = 0\n #print (\"send seconds %f took longer than interval %f, not sleeping\" % (secs, self.interval))\n #print (\"send time = %f, sleep time = %f\" % (secs, sleep_interval) )\n time.sleep(sleep_interval)\n \n def do_work_once(self):\n start_send = time.time()\n for x in xrange(self.num_requests):\n self.post_metrics()\n end_send = time.time()\n secs = end_send - start_send\n #print (\"send time in seconds = %f\" % (secs))\n\n def post_metrics(self):\n try:\n body = []\n for i in xrange(self.num_metrics):\n body.append(self.metric_creator.create_metric())\n self.mon_client.metrics.create(jsonbody=body)\n if self.queue:\n self.queue.put(self.num_metrics)\n except Exception as ex:\n if self.queue:\n self.queue.put(ex)\n else:\n print(ex)\n\n def run(self):\n if self.continuous:\n self.do_work_continuously()\n else:\n self.do_work_once()\n\n\ndef clean_exit(signum, frame=None):\n \"\"\"\n Exit all processes attempting to finish uncommited active work before exit.\n Can be called on an os signal or no zookeeper losing connection.\n \"\"\"\n global exiting\n if exiting:\n # Since this is set up as a handler for SIGCHLD when this kills one child it gets another signal, the global\n # exiting avoids this running multiple times.\n log.debug('Exit in progress clean_exit received additional signal %s' % signum)\n return\n\n log.info('Received signal %s, beginning graceful shutdown.' % signum)\n exiting = True\n\n for process in processors:\n try:\n if process.is_alive():\n process.terminate()\n except Exception:\n pass\n\n # Kill everything, that didn't already die\n for child in multiprocessing.active_children():\n log.debug('Killing pid %s' % child.pid)\n try:\n os.kill(child.pid, signal.SIGKILL)\n except Exception:\n pass\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n print (\"continuous = %d\") % continuous\n print (\"using URL: %s\") % api_url\n print (\"num_process = %d\" % num_processes)\n print (\"num_metrics_per_request = %d\" % num_metrics_per_request)\n print (\"num requests (sent per interval if continuous) = %d\") % num_requests\n print (\"interval (secs) = %d\" % agent_interval)\n print (\"total metrics sent (per interval) = %d\" % (num_processes * num_requests * num_metrics_per_request))\n print (\"total connections (per interval) = %d\" % (num_processes * num_requests))\n\n log.info('num_processes %d', num_processes)\n for x in xrange(0, num_processes): \n p = multiprocessing.Process(\n target=agent_sim_process(x, num_requests, num_metrics_per_request, api_url, continuous,\n keystone, agent_interval).run\n )\n processors.append(p)\n\n ## Start\n try:\n log.info('Starting processes')\n print ('Starting processes %s' % str(datetime.now()))\n start = time.time()\n for process in processors:\n process.start()\n\n # The signal handlers must be added after the processes start otherwise they run on all processes\n signal.signal(signal.SIGCHLD, clean_exit)\n signal.signal(signal.SIGINT, clean_exit)\n signal.signal(signal.SIGTERM, clean_exit)\n\n log.info('calling Process.join() ')\n for process in processors:\n process.join()\n end = time.time()\n print (\"runtime = %d seconds\" % (end - start))\n except Exception:\n print ('Error! Exiting.')\n for process in processors:\n process.terminate()\n end = time.time()\n print (\"runtime = %d seconds\" % (end - start))\n","repo_name":"monasca/monasca-perf","sub_path":"monasca_perf/agent_sim.py","file_name":"agent_sim.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"}
+{"seq_id":"20357421311","text":"import pandas as pd\nimport os\nimport json\n\nfile_data = list()\nfolders = ['autopytorch_thesis_final_2', 'autopytorch_thesis_final_others_2', 'autopytorch_thesis_final_ensemble_opt_2'] # , 'autopytorch_thesis']\nfor folder in folders:\n for root, dirs, files in os.walk(f'/work/ws/nemo/fr_rk250-{folder}-0/small_tasks/'):\n for file in files:\n if 'final_result.json' in file:\n content = pd.read_json(os.path.join(root, file), typ='series')\n file_data.append(content)\n\npd.DataFrame(file_data).to_csv('gathered_results_proper.csv')","repo_name":"ravinkohli/thesis_experiments","sub_path":"gather_results.py","file_name":"gather_results.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29101919985","text":"\"\"\"\r\nCOMP.CS.100 Programming 1\r\nCode Template\r\n\"\"\"\r\n\r\ndef row_encryption(text):\r\n \"\"\"\r\n Encrypts its parameter letter by letter using ROT13 encryption technology.\r\n\r\n :param text: str, string to be encrypted\r\n :return: str, parameter encrypted using ROT13\r\n \"\"\"\r\n encrypted_text = \"\"\r\n for char in text:\r\n encrypted_text += encrypt(char)\r\n return encrypted_text\r\n\r\n\r\ndef encrypt(char):\r\n \"\"\"\r\n Encrypts its parameter using ROT13 encryption technology.\r\n\r\n :param char: str, character to be encrypted\r\n :return: str, parameter encrypted using ROT13\r\n \"\"\"\r\n\r\n regular_chars = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\",\r\n \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\",\r\n \"w\", \"x\", \"y\", \"z\"]\r\n\r\n encrypted_chars = [\"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\",\r\n \"y\", \"z\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\",\r\n \"j\", \"k\", \"l\", \"m\"]\r\n\r\n if char.lower() in regular_chars:\r\n char_index = regular_chars.index(char.lower())\r\n if char.isupper():\r\n return encrypted_chars[char_index].upper()\r\n else:\r\n return encrypted_chars[char_index]\r\n else:\r\n return char\r\n\r\n\r\ndef read_message():\r\n \"\"\"This function does this and this more than this jaada jaada\"\"\"\r\n message = []\r\n \r\n line = input()\r\n while line:\r\n message.append(line)\r\n line = input()\r\n\r\n \r\n return message\r\n\r\ndef main():\r\n print(\"Enter text rows to the message. Quit by entering an empty row.\")\r\n message = read_message()\r\n print(\"regular form: \", message)\r\n print(\"Suing repr()\", repr(message))\r\n print(\"ROT13:\")\r\n for line in message:\r\n print(row_encryption(line))\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Karamiii/PythonClass","sub_path":"6.Kierros/ROT13-Kokonainen_viesti.py","file_name":"ROT13-Kokonainen_viesti.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2749950865","text":"import asyncio\nimport os\n\nimport discord\nimport pymongo\nfrom discord.ext import commands\nfrom pymongo import MongoClient\n\nfrom __main__ import logger\n\ncluster = MongoClient(os.getenv('MONGO_CONNECTION_URL'))\n\ndb = cluster['UserData']\n\ncollection = db['UserData']\n\n\nclass Db(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f'{self.__class__.__name__} Cog has been loaded\\n-----')\n\n def post(post_data: dict):\n mq = {\"_id\": post_data['_id']}\n if (collection.count_documents(mq) == 0):\n collection.insert_one(post_data)\n else:\n q = {\"_id\": post_data['_id']}\n user = collection.find(q)\n for r in user:\n score = r['score']\n score = score + 1\n test = post_data['is_admin']\n collection.update_one({\"_id\": post_data['_id']}, {\"$set\": {\"score\": score}})\n collection.update_one({\"_id\": post_data['_id']}, {\"$set\": {\"is_admin\": test}})\n logger.info(f'Posted data to DB! Data: \"{post_data}\"')\n\n\ndef setup(bot):\n bot.add_cog(Db(bot))\n","repo_name":"RandomRuskiy/brads-server-bot","sub_path":"cogs/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28056575221","text":"import sqlite3\n\nimport pygame.font\n\nfrom commonFunctions import print_text\nfrom commonSettings import *\n\n\ndef db_create():\n\n pass\n\n\ndef db_get_players_scores(level):\n \"\"\"returns table: player_id, name, prev score, record score\"\"\"\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = \"\"\"\n SELECT p.player_id, p.player_name, rs.score, pls.record_score, MAX(rs.round_id) FROM player p\n LEFT JOIN player_level_scores pls ON p.player_id = pls.player_id AND pls.level_id = ?\n LEFT JOIN round_scores rs ON p.player_id = rs.player_id AND rs.level_id = ?\n GROUP BY p.player_name\n ORDER BY p.player_id ASC\n \"\"\".format(table='t')\n cursor.execute(sql, (level, level))\n db_result = cursor.fetchall()\n #print(\"DB RESULT: \",db_result)\n conn.close()\n return db_result\n\ndef db_get_player_name(id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = \"SELECT player_name FROM player WHERE player_id = ?\"\n cursor.execute(sql, (id, ))\n db_result = cursor.fetchall()\n conn.close()\n return db_result[0][0]\n\ndef db_get_level_name(id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = \"SELECT level_name FROM game_levels WHERE gamelevel_id = ?\"\n cursor.execute(sql, (id, ))\n db_result = cursor.fetchall()\n conn.close()\n return db_result[0][0]\n\n\ndef db_add_round_result(player, level, score):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql='INSERT INTO round_scores (player_id, level_id, score) VALUES('+str(player)+', '+str(level)+', '+str(score)+')'\n cursor.execute(sql)\n conn.commit()\n #db_result = cursor.fetchall()\n\n print(\"DB ROUND RESULT UPDATED: \", str(score))\n conn.close()\n\n\ndef db_update_new_record(player, level, score):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n\n sql = 'SELECT record_score FROM player_level_scores WHERE player_id = '+str(player)+' AND level_id = '+str(level)\n\n cursor.execute(sql)\n db_result = cursor.fetchall()\n #print(\"DB RESULT in RECORD CHECK: \", db_result)\n\n if db_result[0][0] < score:\n sql = 'UPDATE player_level_scores SET record_score='+str(score)+' WHERE player_id= '+str(player)+' AND level_id = '+str(level)\n cursor.execute(sql)\n conn.commit()\n print(\"DB RECORD UPDATED: \", str(score))\n #db_result = cursor.fetchall()\n print(\"DB RECORD NOT UPDATED: \", str(db_result[0][0]))\n conn.close()\n\n pass\n\n\n\n# to Avoid Database errors\n# try:\n# cursor.execute(sql_statement)\n# result = cursor.fetchall()\n# except sqlite3.DatabaseError as err:\n# print(\"Error: \", err)\n# else:\n# conn.commit()","repo_name":"voscovvo/MineSmart","sub_path":"database_functions.py","file_name":"database_functions.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13372900811","text":"import json\nimport glob\nfrom shutil import move, copy\nimport pickle\nimport random\n\n# get the right files\n# parse them into json with same filenames\nimport os\n\n\ndef parse_to_json(txt_path, out_json):\n with open(txt_path, encoding=\"utf-8\", errors='ignore') as f:\n lines = f.readlines()\n\n # get transcriptions from 2007 06 11 onwards\n lines = lines[6455:]\n\n result = {}\n\n l = 0\n\n while (l < len(lines) -2):\n print(l)\n\n line = lines[l]\n\n date = line[0:6]\n\n i = 1\n\n next_lines = []\n print(date)\n\n while lines[l + i][0:6] == date:\n next_lines.append(lines[l + i])\n i = i + 1\n\n year = line[0:2]\n month = line[2:4]\n day = line[4:6]\n\n next_lines.insert(0, line)\n\n full_t = \"\"\n for n_line in next_lines:\n p_l = n_line[10:]\n p_l = p_l.strip('\\n')\n\n full_t = full_t + p_l + \" \"\n\n # remove last space\n full_t = full_t[:-1]\n\n split_t = full_t.split(\" - \")\n\n for j, t in enumerate(split_t):\n k = \"20\" + year + \"-\" + month + \"-\" + day + \"_\" + str(j)\n\n result[k] = t\n\n l = l + i\n\n with open(out_json, \"w+\") as out_file:\n json.dump(result, out_file)\n\ndef json_to_txt_from_images(transcriptions_json, image_paths, out_folder, images_folder):\n\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n\n with open(transcriptions_json) as transcriptions_file:\n t_dict = json.load(transcriptions_file)\n\n missing = []\n\n for impath in image_paths:\n\n key = impath.replace(\".png\", \"\")\n key = key.replace(images_folder, \"\")\n\n # print(key)\n\n txt_path = impath.replace(\".png\", \".txt\")\n txt_path = txt_path.replace(images_folder, out_folder)\n\n if key in t_dict.keys():\n annotation = t_dict[key]\n\n file_object = open(txt_path, \"w+\")\n file_object.write(annotation)\n file_object.close()\n\n else:\n print(key)\n missing.append(key)\n\n res = {\"missing\": missing}\n print(res)\n\n with open(\"missing.json\", \"w+\") as missing_file:\n json.dump(res, missing_file)\n\ndef fix_miss_annotated_images(missing_json, images_folder):\n\n with open(missing_json) as missing_file:\n missing_dict = json.load(missing_file)\n\n missing_list = missing_dict[\"missing\"]\n\n for missing in missing_list:\n\n base_path = images_folder + missing[0:-1] + \"*\"\n\n images_list = sorted(glob.glob(base_path))\n\n for i, image_path in enumerate(images_list):\n # remove last digit\n\n # x.png\n new_image_path = image_path[0:-5] + str(i) + \".png\"\n\n move(image_path, new_image_path)\n\ndef remove_missing(missing_json, images_folder):\n\n if not os.path.exists(\"data/dilbert/dilbert_transcribed/missing/\"):\n os.makedirs(\"data/dilbert/dilbert_transcribed/missing/\")\n\n with open(missing_json) as missing_file:\n missing_dict = json.load(missing_file)\n\n missing_list = missing_dict[\"missing\"]\n\n for missing in missing_list:\n\n images_list = sorted(glob.glob(images_folder + missing + \".png\"))\n\n for i, image_path in enumerate(images_list):\n # remove last digit\n new_image_path = image_path.replace(images_folder, \"data/dilbert/dilbert_transcribed/missing/\")\n # x.png\n move(image_path, new_image_path)\n\n\ndef make_train_test_pickles(images_list, image_folder, out_folder):\n\n length = len(images_list)\n tr_length = round(length * .75)\n\n file_paths = [fn.replace(image_folder, \"001.dilbert_transcribed/\").replace(\".png\", \"\") for fn in images_list]\n\n # ['001.Black_footed_Albatross/Black_Footed_Albatross_0046_18',\n print(file_paths[0])\n\n class_i = [1]*length\n\n file_paths_train = file_paths[0:tr_length]\n file_paths_test = file_paths[tr_length:]\n\n classi = [1] * length\n classi_train = classi[0:tr_length]\n classi_test = classi[tr_length:]\n\n print(length)\n\n if not os.path.exists(out_folder + \"train/\"):\n os.makedirs(out_folder + \"train/\")\n\n if not os.path.exists(out_folder + \"test/\"):\n os.makedirs(out_folder + \"test/\")\n\n pickle.dump(file_paths_train, open(out_folder + \"train/filenames.pickle\", \"wb\"))\n pickle.dump(file_paths_test, open(out_folder + \"test/filenames.pickle\", \"wb\"))\n\n pickle.dump(classi_train, open(out_folder + \"train/class_info.pickle\", \"wb\"))\n pickle.dump(classi_test, open(out_folder + \"test/class_info.pickle\", \"wb\"))\n\n\ndef make_examples(transcriptions_list, transcriptions_folder, out_folder, out_folder_old):\n\n # gen 2048\n fn_object = open(out_folder.replace(\"gen_captions/\", \"\") + \"example_filenames\", \"w+\")\n file_names = transcriptions_list[0:2050]\n\n for fn in file_names:\n copy_to = fn.replace(transcriptions_folder, out_folder)\n copy(fn, copy_to)\n\n fn = fn.replace(out_folder_old, \"gen_captions/\").replace(\".txt\", \"\")\n\n fn_object.write(fn)\n fn_object.write(\"\\n\")\n fn_object.close()\n\n\ndef save_losses(g_losses, d_losses, epoch):\n import json\n\n with open(\"g_d_losses_{}.json\".format(epoch), \"w+\") as js_file:\n res = {\"g_losses\": g_losses, \"d_losses\": d_losses}\n json.dump(res, js_file)\n\n\ndef copy_images_transcriptions(image_folder, image_paths, transcriptions_folder, transcriptions_paths, copy_to_im, copy_to_txt):\n\n new_im_path = image_paths[0].replace(image_folder, copy_to_im)\n new_tr_path = transcriptions_paths[0].replace(transcriptions_folder, copy_to_txt)\n\n if not os.path.exists(new_im_path):\n os.makedirs(new_im_path)\n os.makedirs(new_tr_path)\n\n for im_path in image_paths:\n new_im_path = im_path.replace(image_folder, copy_to_im)\n move(im_path, new_im_path)\n\n for tr_path in transcriptions_paths:\n new_tr_path = tr_path.replace(transcriptions_folder, copy_to_txt)\n move(tr_path, new_tr_path)\n\n\nif __name__ == \"__main__\":\n transcriptions_path = \"../data/dilbert/transcriptions.txt\"\n out_json = \"data/dilbert/transcriptions.json\"\n\n # parse_to_json(transcriptions_path, out_json)\n image_folder = \"data/dilbert/dilbert_transcribed_10k/images/001.dilbert_transcribed/\"\n image_paths = sorted(glob.glob(\"data/dilbert/dilbert_transcribed_10k/images/001.dilbert_transcribed/*\"))\n out_folder = \"data/dilbert/dilbert_transcribed/\"\n\n train_paths_im = image_paths[0:3000]\n\n test_paths = image_paths[3000:5200]\n\n # json_to_txt_from_images(out_json, image_paths, out_folder, image_folder)\n\n # fix_miss_annotated_images(\"missing.json\", image_folder)\n\n # remove_missing(\"missing.json\", image_folder)\n\n\n transcriptions_folder = \"data/dilbert/dilbert_transcribed_10k/text/001.dilbert_transcribed/\"\n transcriptions_list = sorted(glob.glob(\"data/dilbert/dilbert_transcribed_10k/text/001.dilbert_transcribed/*\"))\n\n train_paths_txt = transcriptions_list[0:3000]\n\n gen_trans = transcriptions_list[-2100:]\n\n print(len(image_paths))\n\n out_folder_txt = out_folder + \"text/001.dilbert_transcribed/\"\n out_folder_im = out_folder + \"images/001.dilbert_transcribed/\"\n\n image_folder_3k = \"data/dilbert/dilbert_transcribed/images/001.dilbert_transcribed/\"\n image_paths_3k = sorted(glob.glob(\"data/dilbert/dilbert_transcribed/images/001.dilbert_transcribed/*\"))\n\n # make_train_test_pickles(image_paths_3k, image_folder_3k, out_folder)\n\n out_folder_gen_captions = out_folder + \"gen_captions/\"\n out_folder_old = \"data/dilbert/dilbert_transcribed_10k/text/001.dilbert_transcribed/\"\n\n print(len(gen_trans))\n\n # copy_images_transcriptions(image_folder, train_paths_im, transcriptions_folder, train_paths_txt, out_folder_im, out_folder_txt)\n\n # transcriptions_folder = \"data/dilbert/dilbert_transcribed/text/001.dilbert_transcribed/\"\n # make_examples(gen_trans, transcriptions_folder, out_folder_gen_captions, out_folder_old)\n\n","repo_name":"bprovanbessell/research-code","sub_path":"data-prep-and-analysis/transcription_parser.py","file_name":"transcription_parser.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"203728739","text":"import asyncio\nimport time\nurls = [\"www.baidu.com\", \"www.sogou.com\", \"www.goubanjia.com\"]\n\n# Cannot use sync models in async\nasync def makeRequest(url):\n print(\"Requesting: \", url)\n await asyncio.sleep(2)\n print(\"Succeded\")\n return url\n\n# Get the instance returned from an async function\n# c = makeRequest(\"www.baidu.com\")\n\n# Establish event loop\n# loop = asyncio.get_event_loop()\n\n# register the instance into loop\n# loop.run_until_complete(c)\n\n# # using task, depend on loop\n# loop = asyncio.get_event_loop()\n# # establish a task instance\n# task = loop.create_task(c)\n# print(task)\n\n# loop.run_until_complete(task)\n\n# print(task)\n\n# using future, does not depend on loop\n# loop = asyncio.get_event_loop()\n# task = asyncio.ensure_future(c)\n# print(task)\n# loop.run_until_complete(task)\n# print(task)\n\ndef callbackFunc(task):\n print(task.result())\n\n# Initate multiple task instances\ntasks = []\nfor u in urls:\n c = makeRequest(u)\n tasks.append(asyncio.ensure_future(c))\n\n\nloop = asyncio.get_event_loop()\n# task = asyncio.ensure_future(c)\n# callback\n# task.add_done_callback(callbackFunc)\nloop.run_until_complete(asyncio.wait(tasks))\n","repo_name":"YufeiLinUlysses/LearnDataScience","sub_path":"Data Collection/Web_Crawler/asyncCrawler2.py","file_name":"asyncCrawler2.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1075856269","text":"from selenium.common.exceptions import InvalidArgumentException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nclass BasePage():\n def __init__(self, browser, url):\n self.browser = browser\n self.url = url\n\n def open(self):\n try:\n self.browser.get(self.url)\n except InvalidArgumentException:\n raise AssertionError('Не удалось открыть сайт')\n\n def is_present(self, locator, timer=10):\n try:\n wait = EC.presence_of_element_located(locator)\n element = WebDriverWait(self.browser, timer).until(wait)\n return element\n except TimeoutException:\n raise AssertionError('Элемент не найден')\n\n def is_clickable(self, locator, timer=10):\n try:\n wait = EC.element_to_be_clickable(locator)\n element = WebDriverWait(self.browser, timer).until(wait)\n return element\n except TimeoutException:\n raise AssertionError('Элемент не активен или отсутствует')\n\n def click_gently(self, locator, timer=10):\n self.is_clickable(locator).click()\n\n def check_url(self, url):\n return url.lower() == self.browser.current_url\n","repo_name":"Hellsingoff/SeleniumTest","sub_path":"features/steps/pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18284889294","text":"class Dieta:\n\n def _init_(self, id, Restriction, Restriccion, USD) -> None:\n self.id = id\n self.Restriction = Restriction\n self.Restriccion = Restriccion\n self.USD = USD\n \n\n\n def serialize(self):\n return {\n 'id': self.id,\n 'Restriction': self.Restriction,\n 'Restriccion': self.Restriccion,\n 'USD': self.USD\n }\n","repo_name":"mpereyrai/manuel_pereyra_fdi_final_06_07_2023","sub_path":"clase_dieta.py","file_name":"clase_dieta.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9331802514","text":"# Behavioural cloning conditioned on state-conditioned language\n\n\n\n\nfrom argparse import ArgumentParser\nimport pickle\nimport time\nimport os\nimport gym\nimport minerl\nimport torch as th\nimport numpy as np\nfrom lib.data_parallel import BalancedDataParallel\nfrom matplotlib.pyplot import figure\nfrom matplotlib import pyplot as plt\nfigure(figsize=(20, 20), dpi=80)\n\nfrom agent import PI_HEAD_KWARGS, MineRLAgent\nfrom IDM_data_loader_np256 import DataLoader\nfrom lib.tree_util import tree_map\nfrom cosine_annealing_warmup import CosineAnnealingWarmupRestarts\nimport numpy as np \n\n\n\n\n\n\n\n\n\n# NOTE: modify with the desired language model and gated-cross-attention weights\nLM_WEIGHTS_FILE = 'TRAINING/LM_ONLY/_FINAL/VLPT_LM_500__LM.weights'\nVPT_LM_XATTN_WEIGHTS_FILE = 'TRAINING/LM_ONLY/_FINAL/VLPT_LM_500__Xattn_VPT_LM.weights'\n\n\n\n\n\n\n\n\n\n\n# ------------------ MODEL HYPERPARAMETERS\nLM_TIMEOUT_RATE = 1 # results in about 7% silence tokens at NeCubS WPM -- wrong calcualteions, is 20% silence\nF_SEQ_LEN = 96\nL_SEQ_LEN = F_SEQ_LEN//LM_TIMEOUT_RATE\nLM_type = \"transfo-xl-wt103\"\nXATNN_MEMLEN = 128\n\nVPT_MODEL_FILE = 'foundation-model-1x.model' #'VLPT/2x.model'\nVPT_WEIGHTS_FILE = 'foundation-model-1x.weights' # 'VLPT/bc-early-game-2x.weights' # 'VLPT/rl-from-early-game-2x.weights' \nVPT_WIDTH = 1024\nDTYPE = th.bfloat16\n\nTRAINING_LOG_FILE = 'TRAINING/VPT_ONLY/training_log'\nOUTPUT_WEIGHTS = 'TRAINING/VPT_ONLY/VLPT.weights'\n# VPT model automatically downloads transfo_xl weights from HuggingFace and uses those for LM. If weights include the LM it should be overwritten though?\n\n\n\n\n\n# -------------------- TRAINING HYPERPARAMETERS\nVPT_LEARNING_RATE = 0.00002 # VPT paper did 0.000181 for finetuning: [we are training to a very different task], [VPT uses linear learning rate decay], [] # to keep the LM intact I dont \nwarmup_steps = 400 # warmup should be very short since the transformers are pretrained # PaLI uses 1k warmup steps, obviously dont want to do more\nBATCH_SIZE = 16\nEPOCHS = 5\nN_WORKERS = 31 # Needs to be <= number of videos # Ideally more than batch size to create variation in datasets (otherwise, you will get a bunch of consecutive samples)\n\nVPT_WEIGHT_DECAY = 0.039428 # VPT weigh decay. transfoxl weight decay is \nVPT_MAX_GRAD_NORM = 1.0 # VPT says 5.0, transfoXL says 0.25. We will basically c\n\nEVAL_BATCH_SIZE=4\nnum_videos = 172\n\nDATASET = 'DATASET/'\n\nTRAINING_PROGRESS = 'TRAINING/VPT_ONLY/training_progress'\nmax_train_steps = (EPOCHS*num_videos*20*60*30)/(F_SEQ_LEN*BATCH_SIZE) # num steps = number of frames / number of frames per batch# 3*10 mins per video = 600000 ms -> 4687 chunks of 128 frames. want 1000 hours video = 60,000 minutes = 6,000 videos of 10 minutes each\nLOSS_REPORT_RATE = 10\nEVALUATION_RATE = 100\n#higher tha its peak laerning rate. finetuning a multimdodal LM with the same peak lr seems ok according to PaLI,Flamingo but they also train on other tasks, maybe just keep some minecraft data for langauge training?\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ------------------------------------- USEFUL UTILITIES\ndef load_model_parameters(path_to_model_file):\n agent_parameters = pickle.load(open(path_to_model_file, \"rb\"))\n policy_kwargs = agent_parameters[\"model\"][\"args\"][\"net\"][\"args\"]\n pi_head_kwargs = agent_parameters[\"model\"][\"args\"][\"pi_head_opts\"]\n pi_head_kwargs[\"temperature\"] = float(pi_head_kwargs[\"temperature\"])\n return policy_kwargs, pi_head_kwargs\n\ndef save_hidden_states_VPT(video_ids, hidden_state, saved_hidden_states):\n # Unpack the hidden states\n # Iterate over the batch dimension\n for b in range(BATCH_SIZE):\n video_id = video_ids[b]\n video_hidden_state = []\n for l in range(4):\n (hidden_state_1, (hidden_state_2a, hidden_state_2b)) = hidden_state[l]\n #hidden_state_1[:]=False\n #video_id=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n \n # Get the hidden state for this video\n video_hidden_state_layer = (hidden_state_1[b].clone(), (hidden_state_2a[b].clone(), hidden_state_2b[b].clone()))\n video_hidden_state.append(video_hidden_state_layer)\n # Save the hidden state for this video\n saved_hidden_states[video_id] = video_hidden_state\n \n return saved_hidden_states\n\ndef load_hidden_states_VPT(video_ids, saved_hidden_states):\n assert isinstance(video_ids, list)\n assert(len(video_ids)==BATCH_SIZE)\n B = BATCH_SIZE\n T = 128\n E = VPT_WIDTH\n \n # Initialize the hidden states\n hidden_state_1 = [th.zeros([B, 1, T], dtype=th.bool).to(DEVICE)]*4\n hidden_state_2a = [th.zeros([B, T, E], dtype=DTYPE).to(DEVICE)]*4\n hidden_state_2b = [th.zeros([B, T, E], dtype=DTYPE).to(DEVICE)]*4\n \n # Iterate over the batch dimension\n for b in range(B):\n video_id = video_ids[b]\n #video_id=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n \n # Check if a hidden state has been saved for this video\n if video_id in saved_hidden_states:\n\n for l in range(4): # repeat for each layer in VPT\n # Get the saved hidden state for this video\n (video_hidden_state_1, (video_hidden_state_2a, video_hidden_state_2b)) = saved_hidden_states[video_id][l]\n \n # Set the hidden state for this video\n hidden_state_1[l][b] = video_hidden_state_1\n hidden_state_2a[l][b] = video_hidden_state_2a\n hidden_state_2b[l][b] = video_hidden_state_2b\n else:\n print(\"VPT NEW VIDEO SEEN: ADD FRESH INIT. STATE\", video_id)\n\n for l in range(4): # repeat for each layer in VPT\n # Get a new initial hidden state for this video\n \n \n _, (video_hidden_state_2a, video_hidden_state_2b) = policy.initial_state(1)[l]\n \n # Set the initial hidden state for this video\n #print(video_hidden_state_1.shape)\n is_first_frame_true = th.zeros((1, 128), dtype=th.bool).to(DEVICE)\n is_first_frame_true[:,0]=True\n hidden_state_1[l][b] = is_first_frame_true\n hidden_state_2a[l][b] = video_hidden_state_2a\n hidden_state_2b[l][b] = video_hidden_state_2b\n\n hidden_state = []\n for i in range(4):\n hidden_state_layer = hidden_state_1[l], (hidden_state_2a[l], hidden_state_2b[l])\n hidden_state.append(hidden_state_layer)\n\n return hidden_state\n\ndef load_hidden_states_LM(video_ids, saved_hidden_states):\n assert isinstance(video_ids, list)\n try:\n T = policy.net.LM.transformer.mem_len\n n_layers = policy.net.LM.transformer.n_layer\n except: \n return None\n B = BATCH_SIZE\n E = 1024\n\n out_hidden_state = []\n for i in range(n_layers):\n out_hidden_state.append(th.zeros([T,B,E], dtype=DTYPE).to(DEVICE))\n\n for b, video in enumerate(video_ids):\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n\n if video in saved_hidden_states:\n hidden_state = saved_hidden_states[video]\n else:\n hidden_state = policy.net.LM.transformer.init_mems(1)\n #print(\"LM NEW VIDEO SEEN: ADD FRESH INIT. STATE\", video)\n\n for l in range(n_layers):\n #print('\\n',out_hidden_state[l].shape)\n #print(hidden_state[l].shape)\n out_hidden_state[l][:T,b,:E] = hidden_state[l].clone().squeeze(1)\n\n\n return out_hidden_state\n\ndef save_hidden_states_LM(video_ids, hidden_state, saved_hidden_states):\n try:\n T = policy.net.LM.transformer.mem_len\n n_layers = policy.net.LM.transformer.n_layer\n except:\n return None\n B = BATCH_SIZE\n E = 1024\n\n for b, video in enumerate(video_ids): #frames:\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n out_hidden_state = []\n for layer in hidden_state: # rewrite with the new one\n layer_sample = layer[:T,b,:E].clone().unsqueeze(1)\n out_hidden_state.append(layer_sample) # MAKE SURE WE CLONE - we dont want to mutate states that are are in use \n\n saved_hidden_states[video] = out_hidden_state\n\ndef load_hidden_states_Xattn(video_ids, saved_hidden_states, SEQ_LEN, E):\n #assert hidden_state.shape == [BATCH_SIZE,F_SEQ_LEN or L_SEQ_LEN, 2048 or 1024]\n \n XATTN_MEMLEN=XATNN_MEMLEN\n T = XATTN_MEMLEN + SEQ_LEN\n B = BATCH_SIZE\n\n out_hidden_state = th.zeros([B,T,E], dtype=DTYPE).to(DEVICE) # keys may have different lengths, so we pad with -10 and mask them in VLPT forward\n\n for b, video in enumerate(video_ids):\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n\n if video in saved_hidden_states:\n hidden_state = saved_hidden_states[video]\n else:\n hidden_state = th.zeros([1,T,E], dtype=DTYPE).to(DEVICE) # no past keys\n \n #print(\"LM NEW VIDEO SEEN: ADD FRESH INIT. STATE\", video)\n\n out_hidden_state[b] = hidden_state.clone().squeeze(0)\n\n return out_hidden_state\n\ndef save_hidden_states_Xattn(video_ids, hidden_state, saved_hidden_states):\n #assert hidden_state.shape == [BATCH_SIZE,F_SEQ_LEN or L_SEQ_LEN, 2048 or 1024]\n \n for b, video in enumerate(video_ids): #frames:\n #video=video.split(',')[0] IF WE DO THIS, HIDDEN STATE IS PRESERVED EVEN WHEN SWITCHING BETWEEN DIFFERNT TRAJECTORY FILES FROM THE SAME VIDEO. THIS DEPENDS ON THE SECTIONS BEING LOADED IN ORDER, WHIH SI NOT DONE HERE. this may increase val performance anyway by increasing the number of times mems is reset\n out_hidden_state = hidden_state[b].clone().unsqueeze(0)\n\n saved_hidden_states[video] = out_hidden_state\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef VPT_train():\n \n global eval_data_loader, policy, agent, vanvpt_agent, DEVICE, s1, s2 # for eval function\n if th.cuda.is_available():\n DEVICE = \"cuda\"\n else:\n DEVICE = \"cpu\"\n\n #try:\n s1 = th.cuda.Stream()\n s2 = th.cuda.Stream() # for multithreading. When we need to calculate forward pass for training model and original VPT model for KL-divergence, we can do both concurrently.\n #except:\n # pass\n\n\n ### ---------------------------- initialise dataset and training\n print('BC: starting data loaders')\n ## Data Loader init\n train_data_loader = DataLoader(\n dataset_dir=DATASET+'train/',\n n_workers=N_WORKERS,\n batch_size=BATCH_SIZE,\n F_SEQ_LEN=L_SEQ_LEN*LM_TIMEOUT_RATE,\n LM_TIMEOUT_RATE=LM_TIMEOUT_RATE,\n LM_SILENCE_TOKEN=2,\n n_epochs=EPOCHS,\n start_time='rand')\n\n\n\n ### ---------------------- initialise BLC agent\n print('BC: LOADING VLPT')\n ### VPT INIT\n agent_policy_kwargs, agent_pi_head_kwargs = load_model_parameters(VPT_MODEL_FILE)\n # To create model with the right environment.\n # All basalt environments have the same settings, so any of them works here\n agent = MineRLAgent(device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs, LM_type=LM_type, LM_TIMEOUT_RATE=LM_TIMEOUT_RATE, L_SEQ_LEN=L_SEQ_LEN, dtype=DTYPE)\n #agent = MineRLAgent(device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs, LM_type=None, LM_TIMEOUT_RATE=LM_TIMEOUT_RATE, L_SEQ_LEN=L_SEQ_LEN, dtype=DTYPE)\n \n agent.load_weights(VPT_WEIGHTS_FILE) #@ DEBUG EVALUATE\n agent.load_weights(VPT_LM_XATTN_WEIGHTS_FILE) # load Xattn from finetuned LM-only frame-conditioned training\n agent.load_weights(LM_WEIGHTS_FILE)# load LM from finetuned LM-only frame-conditioned training\n #agent.load_weights('TRAINING/VPT_ONLY/final_7/VLPT_2500_.weights')\n\n policy = agent.policy # = th.compile(agent.policy)\n\n ## enable dropout for apporopriate layers\n policy.net.eval() \n policy.net.VPT0_VPT1_dropout.train() # We are only training Xattn2, VPT2-VPT4 and the final layers. so these layers should have dropout applied. These get input from vpt1 and LM. this means we need dropout to apply dropout between VPT1 and VPT 2 (im mixing indices starting at 0 and 1, you know what I mean, neither is very clear :P).\n policy.net.recurrent_layer.blocks[0].train() # for proper dropout ebtween VPT1 and VPT2 we should do this, as this applies the residual after dropout. HOWEVER, this would also apply dropout to the LM input and that an unecessary slowdown of training since LM is not being trained and we apply dropout to the LM input to whats being trained anyway. we COULD reorganise the output VPT1 so that it outputs the residual and the last FFW output separately so we can manually apply dropout and reconnect the residual where needed but this is complicated and porbably not that necessary. Ive spent enough time optimising performance. applying dropout to the residual is probably not that bad right? I at least didnt notice the difference with undoing this with theLM-only training, where its possible to do this possible but i wasnt doing it before.\n for i in [1,2,3]:\n policy.net.recurrent_layer.blocks[i].train() # add dropout to all VPT layers beign trained (2-4)\n policy.net.lastlayer.train()\n policy.net.final_ln.train() # these last two dont even have dropout but i believe in magic\n policy.net.Xattn_LM_VPT.train()\n policy.net.LM.transformer.layers[-1].pos_ff.CoreNet[-1].train() # activate final dropout layer in LM so that residual is applied. This aplpies dropout to LM input of Xattn\n \n\n\n # -- freeze untrained layers\n # we dont train VPT1. LM was trained on this, training it further will likely improve BC performance at cost of LM performance. \n VPT_trainable_params = set()\n VPT_trainable_params.update(set(policy.net.recurrent_layer.blocks[1:4].parameters()))\n VPT_trainable_params.update(set(policy.net.lastlayer.parameters()))\n VPT_trainable_params.update(set(policy.net.final_ln.parameters()))\n VPT_trainable_params.update(set(policy.net.Xattn_LM_VPT.parameters()))\n VPT_trainable_params = list(VPT_trainable_params)\n \n ## freeze layers not being trained -dont updates its params - remove grads # ACTUALLY NEVERMIND - we still want gradients to from from ... actually NVM. I was going to say need grads from T1 so T1 can be learnt, but LM is pretrained to dpeend on t1, and changing it might hurt language, so we leave it frozen and grad-less; gradient-free\n frozen_params = set()\n frozen_params = set(policy.net.parameters()) - set(VPT_trainable_params) # removes LM, first transformer layer\n for param in (list(frozen_params)):\n param.requires_grad=False\n\n\n ### LOAD VANILLA_VPT FOR KL DIVERGENCE CHECKS\n print('BLC: LOADING VAN_VPT')\n vanvpt_agent_policy_kwargs, vanvpt_agent_pi_head_kwargs = load_model_parameters(VPT_MODEL_FILE)\n # To create model with the right environment.\n # All basalt environments have the same settings, so any of them works here\n vanvpt_agent = MineRLAgent(device=DEVICE, policy_kwargs=vanvpt_agent_policy_kwargs, pi_head_kwargs=vanvpt_agent_pi_head_kwargs, LM_type=None, dtype=DTYPE)\n vanvpt_agent.load_weights(VPT_WEIGHTS_FILE)\n \n #vanvpt_agent.policy.net = th.compile(vanvpt_agent.policy.net)\n vanvpt_agent.policy.eval()\n\n\n\n\n # --- DEFINE OPTIMIZER # dont optimize CNN section.\n print('BC: OPTIMISER')\n\n optimizer = th.optim.AdamW(params=VPT_trainable_params, lr=VPT_LEARNING_RATE, weight_decay=VPT_WEIGHT_DECAY)\n lr_schedule = CosineAnnealingWarmupRestarts(optimizer,\n first_cycle_steps=max_train_steps,\n cycle_mult=1.0,\n max_lr=VPT_LEARNING_RATE, #@ WARNING: this sets both VPT and LM learnig rates to the same. For now this is okay because they are the same anyway, but this will need modifying if different learning rates are used in the end\n min_lr=0,\n warmup_steps=warmup_steps,\n gamma=1.0)\n\n\n\n\n \n # --------------------------- start training loop\n print('BC: MAIN LOOP:')\n saved_hidden_states_VPT = {} # this is so that, despite workers>batch size and therefore video not being streamed in perfect order across batches, we can keep track of VPT_hidden_stae and LM_hidden state\n saved_hidden_states_LM = {} # same^\n saved_hidden_states_Xattn1 = {}\n saved_hidden_states_Xattn2 = {}\n saved_hidden_states_vanVPT = {} # same^\n\n lowest_val_loss = [float('inf')]*4\n is_first_frame = th.zeros((BATCH_SIZE, F_SEQ_LEN), dtype=th.bool).to(DEVICE)\n current_video_group_id = [0]*BATCH_SIZE\n start_time = time.time()\n loss_sum=np.zeros([0,2])\n val_loss_sum=np.zeros([0,2]) # only have BC loss and KL-divergence to track\n gates=np.zeros([0,2])\n # get multiple steams of 10 minutes* video across multiple batches. continue until (to ensure lanauge model sees far back langauge)\n for batch_i, (video_group_id, subseq_ids, batch_frames, batch_words, batch_actions, finished_videos) in enumerate(train_data_loader):\n\n\n\n\n # -------------------------------------------- EVALUATION------------------------------------------\n if batch_i%EVALUATION_RATE == 0:\n print(\"## ---------------------------------- - EVAL - ---------------------------------------\", batch_i)\n eval_data_loader = DataLoader(\n dataset_dir=DATASET+'valid/',\n n_workers=EVAL_BATCH_SIZE,\n batch_size=EVAL_BATCH_SIZE, \n F_SEQ_LEN=F_SEQ_LEN,\n n_epochs=1,\n LM_TIMEOUT_RATE=LM_TIMEOUT_RATE,\n max_subseqs_per_traj=30,\n start_time=120)\n VPT_eval_loss, noised_VPT_eval_loss = VPT_evaluate()\n del eval_data_loader\n \n \n val_loss = np.asarray([[VPT_eval_loss, noised_VPT_eval_loss]])\n val_loss_sum = np.concatenate([val_loss_sum, val_loss])\n \n # --- plot val_loss\n plt.plot(val_loss_sum[:,0], color='blue')\n try:\n os.remove('TRAINING/VPT_ONLY/val_loss_graph_.png')\n except: \n pass\n plt.savefig('TRAINING/VPT_ONLY/val_loss_graph_.png')\n plt.clf()\n \n \n # --- plot performance difference between matched words/frames and unmatched\n plt.plot(val_loss_sum[:,1]-val_loss_sum[:,0], color='black') # if NN learning to use words properly, then noised_loss>val_loss, so graph goes up\n try:\n os.remove('TRAINING/VPT_ONLY/noised_diff_graph_.png')\n except: \n pass\n plt.savefig('TRAINING/VPT_ONLY/noised_diff_graph_.png')\n plt.clf()\n\n\n\n\n line=str(\"Eval: VPT_loss: {0}, noised_VPT_loss: {1}\\nXattnGate:1{2}\".format(\n str(VPT_eval_loss), \n str(noised_VPT_eval_loss), \n str(agent.policy.net.Xattn_LM_VPT.alpha_xattn.item())+','+str(agent.policy.net.Xattn_LM_VPT.alpha_dense.item())))\n with open('TRAINING/VPT_ONLY/training_log_val','a') as file:\n file.write(line)\n \n\n # save a model if ALL losses are lower\n save_model=True\n for loss_lowest, loss_new in zip(lowest_val_loss, [VPT_eval_loss, noised_VPT_eval_loss]):\n if loss_lowest < loss_new:\n save_model = False \n if save_model:\n print(\"#----------------------- BEST VAL LOSS! SAVING!\")\n # SAVE MODEL WEIGHTS\n lowest_val_loss = [VPT_eval_loss, noised_VPT_eval_loss]\n output_path = '/'.join(OUTPUT_WEIGHTS.split('/')[0:-1])+'/'\n output_bonus = '_'+str(batch_i)+'_'\n output_name = '.'.join(OUTPUT_WEIGHTS.split('/')[-1].split('.')[0:-1])+output_bonus\n th.save(policy.state_dict(), output_path+output_name+'.weights')\n \n # ALSO SAVE OPTIMIZER AND LEARNING_RATE_SCHEDULER STATES\n th.save(optimizer.state_dict(), output_path+output_bonus+'.optim')\n th.save(lr_schedule.state_dict(), output_path+output_bonus+'.lrschedule')\n print('## ---------------------------------- - TRAIN - ---------------------------------------\"')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # ---------------------------------------------------------- TRAINING BATCH --------------------------------------------\n \n\n\n\n ### ------------ FORMAT INPUT \n # format words\n x_words, y_words = batch_words['input_ids'], batch_words['labels']\n x_words=th.from_numpy(x_words).to(DEVICE)\n y_words=th.from_numpy(y_words).to(DEVICE)\n # format input frames\n batch_frames['img'] = th.from_numpy(batch_frames['img']).to(DTYPE).to(DEVICE) #\n\n # format action labels\n batch_actions['camera'] = batch_actions['camera'].reshape([-1,2])\n batch_actions['buttons'] = batch_actions['buttons'].reshape([-1,20])\n\n action_labels = agent._IDM_action_to_env(batch_actions)\n #print('\\n\\n',action_labels)\n action_labels = agent._env_action_to_agent(action_labels, to_torch=True, check_if_null=False)\n\n action_labels['camera'] = action_labels['camera'].reshape([BATCH_SIZE,F_SEQ_LEN])\n action_labels['buttons'] = action_labels['buttons'].reshape([BATCH_SIZE,F_SEQ_LEN])\n #1/0\n\n\n\n\n\n ## ---------- LOAD MEMS FOR VIDEOS\n VPT_state = load_hidden_states_VPT(video_group_id, saved_hidden_states_VPT)\n vanVPT_state = load_hidden_states_VPT(video_group_id, saved_hidden_states_vanVPT)\n LM_state = load_hidden_states_LM(video_group_id, saved_hidden_states_LM)\n Xattn1_state = load_hidden_states_Xattn(video_group_id, saved_hidden_states_Xattn1, SEQ_LEN=F_SEQ_LEN, E=VPT_WIDTH)\n Xattn2_state = load_hidden_states_Xattn(video_group_id, saved_hidden_states_Xattn2, SEQ_LEN=L_SEQ_LEN, E=1024)\n\n\n th.cuda.synchronize()\n ## ----------------- VLPT MODEL FORWARD PASS\n #with th.cuda.stream(s1):\n # PREDICT VLPT (input frames and paired language tokens). Get output VPT actions, and LM loss\n VLPT_pd_action, _, _, VPT_state, LM_state, _, Xattn1_state, Xattn2_state = policy.get_output_for_observations( # we still need LM state for proper LM inference\n ob_words=x_words,\n ob_frames=batch_frames,\n VPT_state=VPT_state,\n first=is_first_frame,\n LM_state=LM_state,\n LM_labels=None,\n Xattn1_state=Xattn1_state,\n Xattn2_state=Xattn2_state) #not training LM, dont need labels\n\n #with th.cuda.stream(s2):\n # # ----- get action KL-divergence to original VPT\n # with th.no_grad():\n # vanVPT_pd_action, _, _, vanVPT_state, _, _, _, _ = vanvpt_agent.policy.get_output_for_observations(\n # ob_words=None,\n # ob_frames=batch_frames, # give same input\n # VPT_state=vanVPT_state, # use separate mems since VLPT mems are polluted from vanillavpt by LM signal, so cannot be used by original VPT\n # first=is_first_frame)\n\n th.cuda.synchronize() \n\n\n\n # ------------------------- BACKWARD PASS \n # calculate loss\n KL_divergence = 0 #agent.policy.get_kl_of_action_dists(VLPT_pd_action, vanVPT_pd_action).mean() \n VLPT_pd_action['buttons'] = VLPT_pd_action['buttons'].view([BATCH_SIZE*F_SEQ_LEN, 1, 1, 8641])\n VLPT_pd_action['camera'] = VLPT_pd_action['camera'].view([BATCH_SIZE*F_SEQ_LEN, 1, 1, 121])\n VPT_loss = -policy.get_logprob_of_action(VLPT_pd_action, action_labels) # th.zeros([1]) \n #since VPT loss starts higher than LM, we scale it to offset the difference so optimizer doesnt go hard against LM objective in beginning\n _loss = VPT_loss.mean() + KL_divergence\n _loss.backward()\n th.nn.utils.clip_grad_norm_(VPT_trainable_params, VPT_MAX_GRAD_NORM)\n optimizer.step()\n lr_schedule.step()\n policy.zero_grad(set_to_none=True)\n\n # Make sure we do not try to backprop through sequence in future iterations\n vanVPT_state = tree_map(lambda x: x.detach(), vanVPT_state)\n VPT_state = tree_map(lambda x: x.detach(), VPT_state)\n LM_state = tree_map(lambda x: x.detach(), LM_state)\n Xattn1_state = Xattn1_state.detach()\n Xattn2_state = Xattn2_state.detach()\n\n # save hidden states from these videos for next time they show up. print('save_hid..')\n save_hidden_states_VPT(video_group_id, VPT_state, saved_hidden_states_VPT)\n save_hidden_states_VPT(video_group_id, vanVPT_state, saved_hidden_states_vanVPT)\n save_hidden_states_LM(video_group_id, LM_state, saved_hidden_states_LM)\n save_hidden_states_Xattn(video_group_id, Xattn1_state, saved_hidden_states_Xattn1)\n save_hidden_states_Xattn(video_group_id, Xattn2_state, saved_hidden_states_Xattn2)\n \n th.cuda.empty_cache()\n\n\n # --- free up hidden states whose videos have ended (i.e. fix memory leak in original VPT github)\n for video in finished_videos:\n if video in saved_hidden_states_VPT:\n print(\"video ended:\",video,\" cleaning up hidden state...\")\n saved_hidden_states_VPT.pop(video)\n saved_hidden_states_LM.pop(video)\n\n\n\n print('BC: TRAIN BATCH DONE!', video_group_id, subseq_ids, batch_i, finished_videos, VPT_loss.mean().item())\n\n\n\n\n\n # ----- LOSS REPORTING\n os.chdir('/content/drive/MyDrive/_DISSERTATION/')\n loss = np.asarray([[VPT_loss.mean().item(), KL_divergence]])\n loss_sum = np.concatenate([loss_sum, loss],axis=0)\n gates_now = np.asarray([[ abs(agent.policy.net.Xattn_LM_VPT.alpha_xattn.item()),\n abs(agent.policy.net.Xattn_LM_VPT.alpha_dense.item())\n ]])\n gates=np.concatenate([gates,gates_now],axis=0)\n \n if batch_i%LOSS_REPORT_RATE==0:\n print('logging progress...')\n time_since_start = time.time() - start_time\n \n #plot loss\n plt.plot(loss_sum[:,0], color='blue')\n #plt.plot(loss_sum[:,1], color='black')\n try:\n os.remove('TRAINING/VPT_ONLY/loss_graph_.png')\n except:\n pass\n plt.savefig('TRAINING/VPT_ONLY/loss_graph_.png')\n plt.clf()\n plt.close()\n \n #plot gates\n plt.plot(gates[:,0], color='darkblue')\n plt.plot(gates[:,1], color='blue')\n try:\n os.remove('TRAINING/VPT_ONLY/gates_graph_.png')\n except:\n pass\n plt.savefig('TRAINING/VPT_ONLY/gates_graph_.png') #print\n plt.clf()\n plt.close()\n \n # record training progress - so that if it crashes part way through, we can re-try training and resume from the same spot (actually this implementation dumps the rest of the video and we start at the next one.)\n with open(TRAINING_PROGRESS, 'a') as progress_file:\n line=str(batch_i)+str(video_group_id)+str(subseq_ids)\n progress_file.write(line)\n line=str(\"Eval: Time:{0}, VPT_loss: {1}, \\nXattnGate:1{2}\".format(\n str(time_since_start),\n str(VPT_loss),\n str(agent.policy.net.Xattn_LM_VPT.alpha_xattn.item())+','+str(agent.policy.net.Xattn_LM_VPT.alpha_dense.item())\n ))\n with open('TRAINING/VPT_ONLY/training_log','a') as file:\n file.write(line+'\\n')\n\n\n\n\n\n \n # reset losses \n VPT_loss,KL_divergence=0,0\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef VPT_evaluate():\n \n \n with th.no_grad():\n global agent, eval_data_loader, policy, s2, vanvpt_agent\n \n # put VLPT into testing mode\n policy.eval()\n \n # we dont want to disrupt internal states of training during eval so we use fresh ones\n eval_current_video_group_id = 0\n eval_VPT_loss =0\n noise_eval_VPT_loss =0 \n \n eval_is_first_frame = th.zeros((EVAL_BATCH_SIZE, F_SEQ_LEN), dtype=th.bool).to(DEVICE)\n num_batch=0\n for batch_i, (eval_video_group_id, eval_subseq_ids, eval_batch_frames, eval_batch_words, eval_batch_actions, _) in enumerate(eval_data_loader):\n num_batch+=1\n if eval_video_group_id != eval_current_video_group_id:\n eval_current_video_group_id = eval_video_group_id\n\n eval_VPT_state = policy.initial_state(EVAL_BATCH_SIZE)\n eval_LM_state = None\n noise_eval_VPT_state = policy.initial_state(EVAL_BATCH_SIZE)\n noise_eval_LM_state=None\n \n eval_Xattn1_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+F_SEQ_LEN,VPT_WIDTH], dtype=DTYPE).to(DEVICE)\n eval_Xattn2_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+L_SEQ_LEN,1024], dtype=DTYPE).to(DEVICE)\n \n noise_eval_Xattn1_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+F_SEQ_LEN,VPT_WIDTH], dtype=DTYPE).to(DEVICE)\n noise_eval_Xattn2_hidden_state = th.zeros([EVAL_BATCH_SIZE,XATNN_MEMLEN+L_SEQ_LEN,1024], dtype=DTYPE).to(DEVICE)\n\n ### ------------- format input from data loader to agent \n # format input frames\n eval_batch_frames['img'] = th.from_numpy(eval_batch_frames['img']).to(DTYPE).to(DEVICE)\n # format input/label words\n x_words, y_words = eval_batch_words['input_ids'], eval_batch_words['labels']\n x_words=th.from_numpy(x_words).to(DEVICE)\n \n noised_x_words = x_words.clone()\n noised_x_words = th.roll(noised_x_words, 1, 0)\n\n eval_batch_actions['camera'] = eval_batch_actions['camera'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN,2])\n eval_batch_actions['buttons'] = eval_batch_actions['buttons'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN,20])\n eval_action_labels = agent._IDM_action_to_env(eval_batch_actions)\n #print('\\n\\n',action_labels)\n eval_action_labels = agent._env_action_to_agent(eval_action_labels, to_torch=True, check_if_null=True)\n\n\n\n ### ----------- FORWARD VLPT batch with the words swapped around compared to teh frames in (terms of samples index in teh batch). this tells us how much the predicted actions for teh episode are conditioned on the words for that episode\n th.cuda.empty_cache()\n th.cuda.synchronize()\n with th.cuda.stream(s2):\n noise_eval_VLPT_pd_action, _, _, noise_eval_VPT_state, noise_eval_LM_state, _, noise_eval_Xattn1_hidden_state, noise_eval_Xattn2_hidden_state = policy.get_output_for_observations(\n ob_words=noised_x_words,\n ob_frames=eval_batch_frames,\n VPT_state=noise_eval_VPT_state,\n LM_state=noise_eval_LM_state,\n LM_labels=None,\n first=eval_is_first_frame.clone(),\n Xattn1_state=noise_eval_Xattn1_hidden_state,\n Xattn2_state=noise_eval_Xattn2_hidden_state)\n\n ### ----------- FORWARD normal VLPT batch\n with th.cuda.stream(s1):\n eval_VLPT_pd_action, _, _, eval_VPT_state, eval_LM_state, _, eval_Xattn1_hidden_state, eval_Xattn2_hidden_state = policy.get_output_for_observations(\n ob_words=x_words,\n ob_frames=eval_batch_frames,\n VPT_state=eval_VPT_state,\n LM_state=eval_LM_state,\n LM_labels=None,\n first=eval_is_first_frame.clone(),\n Xattn1_state=eval_Xattn1_hidden_state,\n Xattn2_state=eval_Xattn2_hidden_state)\n th.cuda.synchronize()\n th.cuda.empty_cache()\n\n\n eval_VLPT_pd_action['buttons'] = eval_VLPT_pd_action['buttons'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 8641])\n eval_VLPT_pd_action['camera'] = eval_VLPT_pd_action['camera'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 121])\n\n noise_eval_VLPT_pd_action['buttons'] = noise_eval_VLPT_pd_action['buttons'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 8641])\n noise_eval_VLPT_pd_action['camera'] = noise_eval_VLPT_pd_action['camera'].reshape([EVAL_BATCH_SIZE*F_SEQ_LEN, 1, 1, 121])\n\n \n # calculate loss\n eloss = -policy.get_logprob_of_action(eval_VLPT_pd_action, eval_action_labels).mean().item()\n eval_VPT_loss += eloss \n noise_eval_VPT_loss += -policy.get_logprob_of_action( noise_eval_VLPT_pd_action, eval_action_labels).mean().item()\n print(\"BC_EVAL: batch done!\", eval_video_group_id, eval_subseq_ids, eloss)\n\n \n eval_VPT_loss /= num_batch\n noise_eval_VPT_loss /= num_batch\n\n # return VLPT to training mode ## enable dropout for apporopriate layers\n policy.net.eval()\n policy.net.LM.transformer.layers[-1].pos_ff.CoreNet[-1].train() # enable dropout between LM and Xattn2\n policy.net.Xattn_LM_VPT.train() # enable dropout inside xattn2 and before VPT transformer layers 2-4\n for i in [1,2,3]:\n policy.net.recurrent_layer.blocks[i].train() # add dropout to all VPT layers beign trained (2-4)\n policy.net.lastlayer.train()\n policy.net.final_ln.train() # these last two dont even have dropout but i believe in magic\n\n print(eval_VPT_loss, noise_eval_VPT_loss)\n return eval_VPT_loss, noise_eval_VPT_loss\n \n\n # agent estimate 10 video sequence batches of 512 with same tgt_len and mem_len\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n VPT_train()","repo_name":"GaPaLa/VLPT","sub_path":"VLPT/TRAIN_behavioural_cloning.py","file_name":"TRAIN_behavioural_cloning.py","file_ext":"py","file_size_in_byte":35317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23617044951","text":"#!/usr/bin/python\n\nimport sys\nimport re\n\nlines = []\nwith open(sys.argv[1], 'r') as FILE:\n lines = FILE.readlines()\n\ncases = []\nfor line in lines[1:]:\n line = line.split()\n C = int(line[0])\n D = int(line[C + 1])\n cases.append((line[1:C + 1], line[C + 2:C + 2 + D], line[-1]))\n\ncasenumber = 0\nfor case in cases:\n casenumber += 1\n res = ''\n\n combs = {}\n for c in case[0]:\n combs[c[:2]] = c[-1]\n combs[c[:2][::-1]] = c[-1]\n\n ops = []\n for o in case[1]:\n for i in range(2): ops.append(re.compile(o[i] + '.*' + o[1 - i]))\n\n for elem in case[2]:\n res += elem\n\n while (len(res) > 1) and (res[-2:] in combs):\n res = res[:-2] + combs[res[-2:]]\n\n if len(res) > 1:\n for op in ops:\n if op.search(res):\n res = ''\n break\n\n res = str(list(res)).replace(\"'\", '')\n print(\"Case #{0}: {1}\".format(casenumber, res))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/185.py","file_name":"185.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37210701717","text":"import math\nimport pandas as pd\nimport pyro\nimport pyro.distributions as dist\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pyro.infer import SVI, Trace_ELBO\nfrom tqdm import trange\n\n\"\"\"\nThis port to Pyro doesn't work, the topics are not coherent...\n\n\"\"\"\n\n\nclass Encoder(nn.Module):\n def __init__(self, vocab_size, num_topics, hidden, dropout):\n super().__init__()\n self.drop = nn.Dropout(dropout)\n self.fc1 = nn.Linear(vocab_size, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fcmu = nn.Linear(hidden, num_topics)\n self.fclv = nn.Linear(hidden, num_topics)\n self.bnmu = nn.BatchNorm1d(num_topics)\n self.bnlv = nn.BatchNorm1d(num_topics)\n\n def forward(self, inputs):\n h = F.softplus(self.fc1(inputs))\n h = F.softplus(self.fc2(h))\n h = self.drop(h)\n theta_loc = self.bnmu(self.fcmu(h))\n theta_scale = self.bnlv(self.fclv(h))\n return theta_loc, theta_scale\n\n\nclass Decoder(nn.Module):\n def __init__(self, vocab_size, num_topics, dropout):\n super().__init__()\n self.beta = nn.Linear(num_topics, vocab_size)\n self.bn = nn.BatchNorm1d(vocab_size)\n self.drop = nn.Dropout(dropout)\n\n def forward(self, inputs):\n inputs = self.drop(inputs)\n return F.log_softmax(self.bn(self.beta(inputs)), dim=1)\n\n\nclass ProdLDA(nn.Module):\n def __init__(self, vocab_size, num_topics, hidden, dropout, device):\n super().__init__()\n self.vocab_size = vocab_size\n self.num_topics = num_topics\n self.inference_net = Encoder(vocab_size, num_topics, hidden, dropout)\n self.recognition_net = Decoder(vocab_size, num_topics, dropout)\n self.device = device\n\n def model(self, doc_sum=None):\n # register PyTorch module `decoder` with Pyro\n pyro.module(\"recognition_net\", self.recognition_net)\n with pyro.plate(\"documents\", doc_sum.shape[0]):\n # setup hyperparameters\n theta_loc = doc_sum.new_zeros((doc_sum.shape[0], self.num_topics))\n theta_scale = doc_sum.new_ones((doc_sum.shape[0], self.num_topics))\n # sample from prior (value will be sampled by guide\n # when computing the ELBO)\n theta = pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, (0.5 * theta_scale).exp()).to_event(1))\n theta = theta / theta.sum(1, keepdim=True)\n\n count_param = self.recognition_net(theta)\n pyro.sample(\n 'obs',\n dist.Multinomial(doc_sum.shape[1], count_param).to_event(1),\n obs=doc_sum\n )\n\n def guide(self, doc_sum=None):\n # Use an amortized guide for local variables.\n pyro.module(\"inference_net\", self.inference_net)\n with pyro.plate(\"documents\", doc_sum.shape[0]):\n theta_loc, theta_scale = self.inference_net(doc_sum)\n pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, (0.5 * theta_scale).exp()).to_event(1))\n\n def beta(self):\n return self.recognition_net.beta.weight.cpu().detach().T\n\n\ndef train(device, doc_sum, batch_size, learning_rate, num_epochs):\n # clear param store\n pyro.clear_param_store()\n\n prodLDA = ProdLDA(\n vocab_size=doc_sum.shape[1],\n num_topics=100,\n hidden=100,\n dropout=0.2,\n device=device\n )\n prodLDA.to(device)\n\n optimizer = pyro.optim.Adam({\"lr\": learning_rate})\n svi = SVI(prodLDA.model, prodLDA.guide, optimizer, loss=Trace_ELBO())\n num_batches = int(math.ceil(doc_sum.shape[0] / batch_size))\n\n bar = trange(num_epochs)\n for epoch in bar:\n running_loss = 0.0\n\n # Iterate over data.\n for i in range(num_batches):\n batch_doc_sum = doc_sum[i * batch_size:(i + 1) * batch_size, :]\n loss = svi.step(batch_doc_sum)\n running_loss += loss / batch_doc_sum.size(0)\n\n epoch_loss = running_loss / doc_sum.shape[0]\n bar.set_postfix(epoch_loss='{:.2f}'.format(epoch_loss))\n\n return prodLDA\n\n\nif __name__ == '__main__':\n # The data used is the pre-processed AP corpus from David Blei's website:\n # http://www.cs.columbia.edu/~blei/lda-c/\n # (the pre-processing code is not included for simplification)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n doc_sum = torch.load('doc_sum_ap.pt').float().to(device)\n trained_model = train(device, doc_sum, 32, 1e-3, 80)\n\n beta = trained_model.beta()\n torch.save(beta, 'betas.pt')\n\n # Print topics' top words\n vocab = pd.read_csv('../input/prodlda/vocab.csv')\n for i in range(beta.shape[0]):\n sorted_, indices = torch.sort(beta[i], descending=True)\n df = pd.DataFrame(indices[:20].numpy(), columns=['index'])\n print(pd.merge(df, vocab[['index', 'word']], how='left', on='index')['word'].values)\n print()\n","repo_name":"ucals/prodlda_stuck","sub_path":"prodlda_port_to_pyro.py","file_name":"prodlda_port_to_pyro.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"32061455293","text":"import numpy as np \nimport scipy.io.wavfile as wav\nimport matplotlib.pyplot as plt\n\ns=np.random.uniform(-1,1,499)\nn=np.arange(0,499,1)\nx1=np.sin(0.1*np.pi*n)\nx=np.sin(0.1*np.pi*n)+s\nplt.stem(n,x1)\nplt.show()\n\nplt.stem(n,x)\nplt.show()\nk=np.arange(-499,498)\nac=sig.correlate(x,x)\nplt.plot(k,ac)\nplt.show()\n\ni=np.ones(500)\n\nk1=np.arange(-499,499)\ncc=sig.correlate(s,i)\nplt.plot(k1,cc)\nplt.show()\n\n","repo_name":"yashbee313839/DSP-Lab","sub_path":"Time domain representation of LTI systems/autocorrelation.py","file_name":"autocorrelation.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41191933933","text":"import FWCore.ParameterSet.Config as cms\n\nisolEcalPixelTrackFilter = cms.EDFilter('HLTEcalPixelIsolTrackFilter',\n saveTags = cms.bool(True),\n candTag = cms.InputTag('hltIsolEcalPixelTrackProd'),\n MaxEnergyInEB = cms.double(2),\n MaxEnergyInEE = cms.double(4),\n MaxEnergyOutEB = cms.double(1.2),\n MaxEnergyOutEE = cms.double(2),\n NMaxTrackCandidates = cms.int32(10),\n DropMultiL2Event = cms.bool(False),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"HLTrigger/special/isolEcalPixelTrackFilter_cfi.py","file_name":"isolEcalPixelTrackFilter_cfi.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"10182332159","text":"from keras.models import Sequential, Model, model_from_json, load_model\nfrom keras.layers import Conv1D, MaxPool1D, Dropout, Flatten, Dense, Activation, Add, Input, AveragePooling1D, Concatenate, Lambda\nfrom keras.callbacks import ModelCheckpoint,CSVLogger,EarlyStopping\nfrom keras.losses import binary_crossentropy, categorical_crossentropy\nfrom keras import backend as K\nfrom keras.optimizers import SGD,Adam\n\ndef residual_block(x, filters, conv_num=3, activation=\"relu\"):\n s = Conv1D(filters, 1, padding=\"same\")(x)\n for i in range(conv_num - 1):\n x = Conv1D(filters, 3, padding=\"same\")(x)\n x = Activation(activation)(x)\n x = Conv1D(filters, 3, padding=\"same\")(x)\n x = Add()([x, s])\n x = Activation(activation)(x)\n return MaxPool1D(pool_size=2, strides=2)(x)\n\ndef create_base_model(input_shape, embeddings,num_classes):\n inputs = keras.layers.Input(shape=input_shape, name=\"input\")\n\n x = residual_block(inputs, 16, 2)\n x = residual_block(x, 32, 2)\n x = residual_block(x, 64, 3)\n x = residual_block(x, 128, 3)\n x = residual_block(x, 128, 3)\n\n x = AveragePooling1D(pool_size=3, strides=3)(x)\n x = Flatten()(x)\n x = Dense(256, activation=\"relu\")(x)\n x = Dense(units= embeddings, activation=\"relu\",name='embedding')(x)\n\n outputs = Dense(num_classes, activation=\"softmax\", name=\"output\")(x)\n\n base_model=Model(inputs, outputs)\n base_model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=\"Adam\",\n metrics=['accuracy'])\n return base_model\n","repo_name":"dboursinos/Efficient-Probability-Intervals-Classification-Inductive-Venn-Predictors","sub_path":"dynamic_taxonomies/Ecobee_Thermostat/cnn_1D.py","file_name":"cnn_1D.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39525285187","text":"def genrange(start, end):\n while start <= end:\n yield start\n start += 1\n\n\nnumber = (el for el in range(1, 11))\nprint(list(number))\nprint(type(number))\n\nnumbers = genrange(1, 10)\nprint(list(numbers))\nprint(type(numbers))\n","repo_name":"StivnNkolov/SoftUni-Python","sub_path":"Python-OOP/14-Iterators-and-Generators-lab/05-Generator-Range.py","file_name":"05-Generator-Range.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"38525982466","text":"from deposit_gui.view.vmdiarea_frames.abstract_mdiarea_frame import AbstractMDIAreaFrame\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.relation_frame import RelationFrame\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_table import QueryTabTable\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_images import (QueryTabImagesLazy, QueryTabImages)\nfrom deposit_gui.view.vmdiarea_frames.query_frame_elements.query_tab_graph import (QueryTabGraphLazy, QueryTabGraph)\n\nfrom PySide2 import (QtWidgets, QtCore, QtGui)\n\nclass QueryFrame(AbstractMDIAreaFrame, QtWidgets.QFrame):\n\t\n\tsignal_query_selected = QtCore.Signal(list)\t\t# [QueryItem, ...]\n\tsignal_query_activated = QtCore.Signal(object)\t# QueryItem\n\tsignal_object_selected = QtCore.Signal(list)\t# [DObject, ...]\n\tsignal_relation_selected = QtCore.Signal(list)\t# [(Source, Target, label), ...]\n\tsignal_add_object = QtCore.Signal(object)\t# Query\n\tsignal_del_object = QtCore.Signal()\n\tsignal_del_descriptor = QtCore.Signal()\n\tsignal_edited = QtCore.Signal(object, object)\t# QueryItem, value\n\tsignal_drop_url = QtCore.Signal(object, str)\t# QueryItem, url\n\t\n\t# signal_class_link = QtCore.Signal(str)\t\t\t\t# class_name\n\t# signal_relation_link = QtCore.Signal(int, str, str)\t# obj_id, rel_label, class_name\n\t# signal_relation_unlink = QtCore.Signal(int, str, str)\t# obj_id, rel_label, class_name\n\t\n\t\n\tINITIAL_THUMBNAIL_SIZE = 128\n\t\n\tdef __init__(self, query, cmodel, cview):\n\t\t\n\t\tAbstractMDIAreaFrame.__init__(self)\n\t\tQtWidgets.QFrame.__init__(self)\n\t\t\n\t\tself._query = query\n\t\tself._cmodel = cmodel\n\t\tself._cview = cview\n\t\t\n\t\tself.setLayout(QtWidgets.QVBoxLayout())\n\t\tself.layout().setContentsMargins(0, 0, 0, 0)\n\t\tself.layout().setSpacing(0)\n\t\t\n\t\tself.relation_frame = RelationFrame()\n\t\tself.relation_frame.signal_object_link.connect(self.on_object_link)\n\t\tself.signal_class_link = self.relation_frame.signal_class_link\n\t\tself.signal_relation_link = self.relation_frame.signal_relation_link\n\t\tself.signal_relation_unlink = self.relation_frame.signal_relation_unlink\n\t\t\n\t\tself.footer = QueryFooter(self)\n\t\t\n\t\tself.tab_table = QueryTabTable(self)\n\t\tself.tab_images = QueryTabImagesLazy(self)\n\t\tself.tab_graph = QueryTabGraphLazy(self)\n\t\t\n\t\tself.tabs = QtWidgets.QTabWidget()\n\t\tself.tabs.addTab(self.tab_table, \"Table\")\n\t\tself.tabs.addTab(self.tab_images, \"Images\")\n\t\tself.tabs.addTab(self.tab_graph, \"Graph\")\n\t\t\n\t\tsplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)\n\t\tsplitter.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n\t\tself.layout().addWidget(splitter)\n\t\t\n\t\tframe_left = QtWidgets.QFrame()\n\t\tframe_left.setLayout(QtWidgets.QVBoxLayout())\n\t\tframe_left.layout().setContentsMargins(0, 0, 0, 0)\n\t\tframe_left.layout().setSpacing(0)\n\t\tframe_left.layout().addWidget(self.tabs)\n\t\tframe_left.layout().addWidget(self.footer)\n\n\t\tself.scroll_area = QtWidgets.QScrollArea()\n\t\tself.scroll_area.setWidgetResizable(True)\n\t\tself.scroll_area.setFrameStyle(QtWidgets.QFrame.NoFrame)\n\t\tself.scroll_area.setWidget(self.relation_frame)\n\t\t\n\t\tsplitter.addWidget(frame_left)\n\t\tsplitter.addWidget(self.scroll_area)\n\t\t\n\t\tself._filter_timer = QtCore.QTimer()\n\t\tself._filter_timer.setSingleShot(True)\n\t\tself._filter_timer.timeout.connect(self.on_filter_timer)\n\t\t\n\t\tself.footer.set_object_buttons_enabled(self._query.main_class != \"*\")\n\t\tself.footer.set_del_object_enabled(False)\n\t\t\n\t\tself.tabs.currentChanged.connect(self.on_tab_changed)\n\t\t\n\t\tself.update_count()\n\t\n\tdef title(self):\n\t\t\n\t\treturn self._query.querystr\n\t\n\tdef icon(self):\n\t\t\n\t\treturn \"dep_cube.svg\"\n\t\n\tdef update_query(self, objects = None, classes = None):\n\t\t\n\t\tif ((objects is None) and (classes is None)) or \\\n\t\t\t(\"*\" in self._query.classes) or \\\n\t\t\t(self._query.main_class is None) or \\\n\t\t\tset(self._query.classes).intersection([\n\t\t\t\tcls if isinstance(cls, str) else cls.name for cls in classes\n\t\t\t]) or self._query.objects.intersection([\n\t\t\t\tobj if isinstance(obj, int) else obj.id for obj in objects\n\t\t\t]):\n\t\t\t\n\t\t\tcurrent_index = self.tabs.currentIndex()\n\t\t\tif current_index > 0:\n\t\t\t\tself.tabs.setCurrentIndex(0)\n\t\t\t\n\t\t\tself._query.process()\n\t\t\tself._cview.progress.stop()\n\t\t\t\n\t\t\tself.tab_table.update_query()\n\t\t\tself.relation_frame.populate()\n\t\t\tif isinstance(self.tab_images, QueryTabImages):\n\t\t\t\tself.tab_images.update_query(\n\t\t\t\t\tself.tab_table.get_images(), self.tab_table.get_item_order()\n\t\t\t\t)\n\t\t\tif isinstance(self.tab_graph, QueryTabGraph):\n\t\t\t\tself.tab_graph.update_query()\n\t\t\t\n\t\t\tif current_index > 0:\n\t\t\t\tself.tabs.setCurrentIndex(current_index)\n\t\n\tdef select_all(self):\n\t\t\n\t\tself.get_current_tab().selectAll()\n\t\n\tdef clear_selection(self):\n\t\t\n\t\tself.get_current_tab().clearSelection()\n\t\n\tdef populate_tab_images(self):\n\t\t\n\t\tif isinstance(self.tab_images, QueryTabImages):\n\t\t\treturn\n\t\t\n\t\tself.tab_images = QueryTabImages(self, self.tab_table.get_images(), self.tab_table.get_item_order(), self._cmodel)\n\t\tself.tab_images.set_thumbnail_size(self.INITIAL_THUMBNAIL_SIZE)\n\t\tself.tabs.blockSignals(True)\n\t\tself.tabs.insertTab(1, self.tab_images, \"Images\")\n\t\tself.tabs.removeTab(2)\n\t\tself.tabs.setCurrentIndex(1)\n\t\tself.tabs.blockSignals(False)\n\t\n\tdef populate_tab_graph(self):\n\t\t\n\t\tif isinstance(self.tab_graph, QueryTabGraph):\n\t\t\treturn\n\t\tself.tab_graph = QueryTabGraph(self, set([self._cmodel.get_object(obj_id) for obj_id in self.tab_table.get_obj_ids()]))\n\t\tself.tabs.blockSignals(True)\n\t\tself.tabs.insertTab(2, self.tab_graph, \"Graph\")\n\t\tself.tabs.removeTab(3)\n\t\tself.tabs.setCurrentIndex(2)\n\t\tself.tabs.blockSignals(False)\n\t\n\tdef get_current_tab(self):\n\t\t\n\t\treturn [self.tab_table, self.tab_images, self.tab_graph][self.tabs.currentIndex()]\n\t\n\tdef update_count(self):\n\t\t\n\t\tself.footer.set_count(self.get_current_tab().get_row_count())\n\t\n\tdef get_header(self, col, user_role = False):\n\t\t# pass to deposit.AbstractExternalsource to provide header data from QueryTabTable\n\t\t\n\t\treturn self.tab_table._table_model.headerData(col, QtCore.Qt.Horizontal, QtCore.Qt.UserRole if user_role else QtCore.Qt.DisplayRole)\n\t\n\tdef get_item(self, row, col):\n\t\t# pass to deposit.AbstractExternalsource to provide data from QueryTabTable\n\t\t\n\t\treturn self.tab_table._table_model.index(row, col).data(QtCore.Qt.UserRole)\n\t\n\tdef get_row_count(self):\n\t\t\n\t\treturn self.tab_table.get_row_count()\n\t\n\tdef get_column_count(self):\n\t\t\n\t\treturn self.tab_table.get_column_count()\n\t\n\t@QtCore.Slot(int)\n\tdef on_tab_changed(self, index):\n\t\t\n\t\tif index == 1:\n\t\t\tself.populate_tab_images()\n\t\telif index == 2:\n\t\t\tself.populate_tab_graph()\n\t\t\n\t\tself.update_count()\n\t\tself.get_current_tab().on_selected()\n\t\n\t@QtCore.Slot(int)\n\tdef on_zoom(self, value):\n\t\t\n\t\tself.tab_images.set_thumbnail_size(value)\n\t\n\t@QtCore.Slot()\n\tdef on_filter(self):\n\t\t\n\t\tself._filter_timer.start(1000)\n\t\n\t@QtCore.Slot()\n\tdef on_filter_timer(self):\n\t\t\n\t\tself.tab_table.apply_filter(self.footer.get_filter_text())\n\t\tif isinstance(self.tab_images, QueryTabImages):\n\t\t\tself.tab_images.apply_filter(self.tab_table.get_item_order())\n\t\tif isinstance(self.tab_graph, QueryTabGraph):\n\t\t\tself.tab_graph.apply_filter(set([self._cmodel.get_object(obj_id) for obj_id in self.tab_table.get_obj_ids()]))\n\t\tself.update_count()\n\t\n\t@QtCore.Slot()\n\tdef on_sorted(self):\n\t\t\n\t\tself.tab_images.sort(self.tab_table.get_item_order())\n\t\n\t@QtCore.Slot()\n\tdef on_add_object(self):\n\t\t\n\t\tself.signal_add_object.emit(self._query)\n\t\n\t@QtCore.Slot()\n\tdef on_del_object(self):\n\t\t\n\t\tself.signal_del_object.emit()\n\t\n\t@QtCore.Slot()\n\tdef on_del_descriptor(self):\n\t\t\n\t\tself.signal_del_descriptor.emit()\n\t\n\t@QtCore.Slot(int)\n\tdef on_to_object(self, obj_id):\n\t\t\n\t\tif obj_id is None:\n\t\t\treturn\n\t\tself.tabs.setCurrentIndex(0)\n\t\tself.tab_table.select_object(obj_id)\n\t\n\t@QtCore.Slot(int)\n\tdef on_object_link(self, obj_id):\n\t\t\n\t\tself.on_to_object(obj_id)\n\t\n\tdef on_query_activated(self, item):\n\t\t\n\t\tself.signal_query_activated.emit(item)\n\t\n\tdef on_query_selected(self, items):\n\t\t\n\t\tself.signal_query_selected.emit(items)\n\t\t\n\t\thas_descriptor = False\n\t\tfor item in items:\n\t\t\tif (item.obj_id is not None) and (item.value is not None):\n\t\t\t\thas_descriptor = True\n\t\t\t\tbreak\n\t\tfound = False\n\t\tfor item in items:\n\t\t\tif item.obj_id is not None:\n\t\t\t\tself.relation_frame.populate(self._cmodel.get_object(item.obj_id))\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tif not found:\n\t\t\tself.relation_frame.populate()\n\t\t\n\t\tself.footer.set_del_descriptor_enabled(has_descriptor)\n\t\n\tdef on_object_activated(self, obj_id):\n\t\t\n\t\tself.on_to_object(obj_id)\n\t\n\tdef on_object_selected(self, object_ids):\n\t\t\n\t\tobjects = [self._cmodel.get_object(obj_id) for obj_id in object_ids]\n\t\tobj = None\n\t\tif len(objects) == 1:\n\t\t\tobj = objects[0]\n\t\tself.relation_frame.populate()\n\t\tself.signal_object_selected.emit(objects)\n\t\n\tdef on_relation_selected(self, relations):\n\t\t\n\t\tself.signal_relation_selected.emit([(self._cmodel.get_object(source), self._cmodel.get_object(target), label) for source, target, label in relations])\n\t\n\tdef on_selected_rows(self, row_items):\n\t\t\n\t\tself.footer.set_del_object_enabled(len(row_items) > 0)\n\t\n\tdef on_edited(self, item, value):\n\t\t\n\t\tself.signal_edited.emit(item, value)\n\t\n\tdef on_drop_url(self, item, url):\n\t\t\n\t\tself.signal_drop_url.emit(item, url)\n\t\n\tdef on_deactivate(self):\n\t\t\n\t\tself.tab_table.clearSelection()\n\t\tself.tab_images.clearSelection()\n\t\tself.tab_graph.deselect_all()\n\t\n\tdef on_close(self):\n\t\t\n\t\tself._filter_timer.stop()\n\t\tself.tab_table.on_close()\n\t\tself.tab_images.on_close()\n\t\tself.tab_graph.on_close()\n\nclass QueryFooter(QtWidgets.QFrame):\n\n\tdef __init__(self, queryframe):\n\n\t\tQtWidgets.QFrame.__init__(self)\n\t\t\n\t\tself._queryframe = queryframe\n\t\tself._count_text = None\n\t\t\n\t\tself.setFrameShape(QtWidgets.QFrame.StyledPanel)\n\t\tself.setFrameShadow(QtWidgets.QFrame.Raised)\n\n\t\tself.setLayout(QtWidgets.QGridLayout())\n\t\tself.layout().setContentsMargins(5, 0, 0, 0)\n\t\tself.layout().setSpacing(0)\n\t\t\n\n\t\tself.add_object_button = QtWidgets.QToolButton()\n\t\tself.add_object_button.setIcon(self._queryframe.get_icon(\"add_object.svg\"))\n\t\tself.add_object_button.setIconSize(QtCore.QSize(24, 24))\n\t\tself.add_object_button.setAutoRaise(True)\n\t\tself.add_object_button.setToolTip(\"Add Object\")\n\t\tself.add_object_button.setContentsMargins(0, 0, 0, 0)\n\t\tself.add_object_button.clicked.connect(self._queryframe.on_add_object)\n\t\tself.layout().addWidget(self.add_object_button, 0, 0)\n\t\t\n\t\tself.del_object_button = QtWidgets.QToolButton()\n\t\tself.del_object_button.setIcon(self._queryframe.get_icon(\"remove_object.svg\"))\n\t\tself.del_object_button.setIconSize(QtCore.QSize(24, 24))\n\t\tself.del_object_button.setAutoRaise(True)\n\t\tself.del_object_button.setToolTip(\"Remove Object\")\n\t\tself.del_object_button.setContentsMargins(0, 0, 0, 0)\n\t\tself.del_object_button.clicked.connect(self._queryframe.on_del_object)\n\t\tself.layout().addWidget(self.del_object_button, 0, 1)\n\t\t\n\t\tself.del_descriptor_button = QtWidgets.QToolButton()\n\t\tself.del_descriptor_button.setIcon(self._queryframe.get_icon(\"remove_descriptor.svg\"))\n\t\tself.del_descriptor_button.setIconSize(QtCore.QSize(24, 24))\n\t\tself.del_descriptor_button.setAutoRaise(True)\n\t\tself.del_descriptor_button.setToolTip(\"Remove Descriptor\")\n\t\tself.del_descriptor_button.setContentsMargins(0, 0, 5, 0)\n\t\tself.del_descriptor_button.clicked.connect(self._queryframe.on_del_descriptor)\n\t\tself.layout().addWidget(self.del_descriptor_button, 0, 2)\n\t\t\n\t\tfilter_label = QtWidgets.QLabel(\"Filter:\")\n\t\tfilter_label.setContentsMargins(5, 0, 0, 0)\n\t\tself.layout().addWidget(filter_label, 0, 3)\n\n\t\tself._filter_edit = QtWidgets.QLineEdit()\n\t\tself._filter_edit.setContentsMargins(5, 0, 5, 0)\n\t\tself._filter_edit.textEdited.connect(self._queryframe.on_filter)\n\t\tself.layout().addWidget(self._filter_edit, 0, 4)\n\n\t\tself._count_label = QtWidgets.QLabel(\"Found: %s\")\n\t\tself._count_label.setContentsMargins(0, 0, 5, 0)\n\t\tself.layout().addWidget(self._count_label, 0, 5)\n\t\t\n\t\tself._count_text = self._count_label.text()\n\t\n\tdef get_filter_text(self):\n\t\t\n\t\treturn self._filter_edit.text()\n\t\n\tdef set_count(self, count):\n\n\t\tself._count_label.setText(self._count_text % (count))\n\t\n\tdef set_object_buttons_enabled(self, state):\n\n\t\tself.add_object_button.setVisible(state)\n\t\tself.del_object_button.setVisible(state)\n\t\n\tdef set_del_object_enabled(self, state):\n\t\t\n\t\tself.del_object_button.setEnabled(state)\n\t\n\tdef set_del_descriptor_enabled(self, state):\n\t\t\n\t\tself.del_descriptor_button.setEnabled(state)\n\n","repo_name":"demjanp/deposit_gui","sub_path":"src/deposit_gui/view/vmdiarea_frames/query_frame.py","file_name":"query_frame.py","file_ext":"py","file_size_in_byte":12273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"71098380993","text":"from AthenaConfiguration.ComponentFactory import CompFactory \nfrom AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.MainServicesConfig import MainServicesCfg\nfrom LArCalibProcessing.utils import FolderTagResolver\nfrom IOVDbSvc.IOVDbSvcConfig import addFolders\nfrom copy import deepcopy\n\ndef _ofcAlg(flags,postfix,folderSuffix,nPhases,dPhases,nDelays,nColl):\n result=ComponentAccumulator()\n LArPhysOFCAlg = CompFactory.LArOFCAlg(\"LArOFCPhysAlg_\"+postfix)\n LArPhysOFCAlg.isSC = flags.LArCalib.isSC\n LArPhysOFCAlg.ReadCaliWave = False\n LArPhysOFCAlg.KeyList = [ \"LArPhysWave\" ]\n LArPhysOFCAlg.Normalize = flags.LArCalib.OFC.Normalize\n LArPhysOFCAlg.Verify = True\n LArPhysOFCAlg.GroupingType = flags.LArCalib.GroupingType\n LArPhysOFCAlg.Nphase = nPhases\n LArPhysOFCAlg.Dphase = dPhases\n LArPhysOFCAlg.Ndelay = nDelays\n LArPhysOFCAlg.Nsample = flags.LArCalib.OFC.Nsamples\n LArPhysOFCAlg.FillShape = True\n LArPhysOFCAlg.TimeShift = False\n LArPhysOFCAlg.AddTimeOffset = -1.0*12.0#TimeShiftGuardRegion\n LArPhysOFCAlg.LArPhysWaveBinKey = \"LArPhysWaveShift\"\n LArPhysOFCAlg.UseDelta = flags.LArCalib.OFC.useDelta\n LArPhysOFCAlg.KeyOFC = \"LArOFC_\"+postfix\n LArPhysOFCAlg.KeyShape = \"LArShape_\"+postfix\n \n if flags.LArCalib.OFC.Nsamples==4 and not flags.LArCalib.isSC:\n LArPhysOFCAlg.ReadDSPConfig = True\n LArPhysOFCAlg.DSPConfigFolder = \"/LAR/Configuration/DSPConfiguration\"\n \n\n LArPhysOFCAlg.DecoderTool = CompFactory.LArAutoCorrDecoderTool(UseAlwaysHighGain=flags.LArCalib.PhysACuseHG,\n isSC = flags.LArCalib.isSC)\n\n result.addEventAlgo(LArPhysOFCAlg)\n if (nColl>0):\n #create a copy to calculate pile-up OFCs\n LArPhysOFCAlgMu=deepcopy(LArPhysOFCAlg)\n LArPhysOFCAlgMu.name=LArPhysOFCAlg.name+\"_mu\"\n LArPhysOFCAlgMu.KeyOFC = \"LArOFC_\"+postfix+\"_mu\"\n LArPhysOFCAlg.KeyShape = \"LArShape_\"+postfix+\"_mu\"\n LArPhysOFCAlgMu.DecoderTool = CompFactory.LArAutoCorrDecoderTool(DecodeMode=1,\n UseAlwaysHighGain=flags.LArCalib.PhysACuseHG,\n isSC = flags.LArCalib.isSC,\n KeyAutoCorr=\"LArPhysAutoCorr\")\n result.addEventAlgo(LArPhysOFCAlgMu)\n \n \n rootfile=flags.LArCalib.Output.ROOTFile\n if rootfile != \"\":\n bcKey = \"LArBadChannelSC\" if flags.LArCalib.isSC else \"LArBadChannel\" \n OFC2Ntup=CompFactory.LArOFC2Ntuple(\"LArOFC2Ntuple_\"+postfix)\n OFC2Ntup.ContainerKey = \"LArOFC_\"+postfix\n OFC2Ntup.NtupleName = \"OFC_\"+postfix\n OFC2Ntup.AddFEBTempInfo = False \n OFC2Ntup.isSC = flags.LArCalib.isSC\n OFC2Ntup.BadChanKey = bcKey\n result.addEventAlgo(OFC2Ntup)\n\n if (nColl>0):\n OFC2NtupMu=CompFactory.LArOFC2Ntuple(\"LArOFC2Ntuple_\"+postfix+\"_mu\")\n OFC2NtupMu.ContainerKey = \"LArOFC_\"+postfix+\"_mu\"\n OFC2NtupMu.NtupleName = \"OFC_\"+postfix+\"_mu\"\n OFC2NtupMu.AddFEBTempInfo = False \n OFC2NtupMu.isSC = flags.LArCalib.isSC\n OFC2NtupMu.BadChanKey = bcKey\n result.addEventAlgo(OFC2NtupMu)\n \n\n Shape2Ntup=CompFactory.LArShape2Ntuple(\"LArShape2Ntuple_\"+postfix)\n Shape2Ntup.ContainerKey=\"LArShape_\"+postfix\n Shape2Ntup.NtupleName=\"SHAPE_\"+postfix\n Shape2Ntup.AddFEBTempInfo = False\n Shape2Ntup.isSC = flags.LArCalib.isSC\n Shape2Ntup.BadChanKey = bcKey\n result.addEventAlgo(Shape2Ntup)\n\n\n objList=[\"LArOFCComplete#LArOFC_\"+postfix+\"#\"+flags.LArCalib.OFCPhys.Folder+folderSuffix,\n \"LArShapeComplete#LArShape_\"+postfix+\"#\"+flags.LArCalib.Shape.Folder+folderSuffix]\n\n rs=FolderTagResolver()\n OFCTag=rs.getFolderTag(flags.LArCalib.OFCPhys.Folder+folderSuffix)\n ShapeTag=rs.getFolderTag(flags.LArCalib.Shape.Folder+folderSuffix)\n tagList=[OFCTag,ShapeTag]\n\n if nColl > 0:\n objList+=[\"LArOFCComplete#LArOFC_\"+postfix+\"_mu#\"+flags.LArCalib.OFCPhys.Folder+folderSuffix]\n tagstr=rs.getFolderTag(flags.LArCalib.OFCPhys.Folder+folderSuffix)\n tagpref=tagstr[0:tagstr.find(folderSuffix)+len(folderSuffix)]\n tagpost=tagstr[tagstr.find(folderSuffix)+len(folderSuffix):]\n nc=int(nColl)\n OFCTagmu=f'{tagpref}-mu-{nc}{tagpost}' \n tagList.append(OFCTagmu)\n del rs #Close database\n\n from RegistrationServices.OutputConditionsAlgConfig import OutputConditionsAlgCfg\n result.merge(OutputConditionsAlgCfg(flags,name=\"OutCondAlg\",\n outputFile=flags.LArCalib.Output.POOLFile,\n ObjectList=objList,\n IOVTagList=tagList,\n Run1=flags.LArCalib.IOVStart,\n Run2=flags.LArCalib.IOVEnd\n ))\n\n return result\n\n\ndef LArOFCPhysCfg(flags,loadPhysAC=True):\n\n #Get basic services and cond-algos\n from LArCalibProcessing.LArCalibBaseConfig import LArCalibBaseCfg,chanSelStr\n result=LArCalibBaseCfg(flags)\n\n nColl=flags.LArCalib.OFC.Ncoll\n from LArCalibProcessing.utils import FolderTagResolver\n FolderTagResolver._globalTag=flags.IOVDb.GlobalTag\n rs=FolderTagResolver()\n PhysWaveTag=rs.getFolderTag(flags.LArCalib.PhysWave.Folder)\n AutoCorrTag=rs.getFolderTag(flags.LArCalib.AutoCorr.Folder)\n PhysAutoCorrTag= rs.getFolderTag(flags.LArCalib.PhysAutoCorr.Folder)\n if (nColl>0):\n #Insert mu in tag-name:\n elems=PhysAutoCorrTag.split(\"-\")\n PhysAutoCorrTag=\"-\".join([elems[0]+\"_mu_%i\"%nColl,]+elems[1:])\n del elems\n\n PhysCaliTdiffTag=rs.getFolderTag(flags.LArCalib.PhysCaliTdiff.Folder)\n del rs #Close database\n\n result.merge(addFolders(flags,flags.LArCalib.PhysWave.Folder,detDb=flags.LArCalib.Input.Database, tag=PhysWaveTag, modifiers=chanSelStr(flags)))\n result.merge(addFolders(flags,flags.LArCalib.AutoCorr.Folder,detDb=flags.LArCalib.Input.Database, tag=AutoCorrTag, modifiers=chanSelStr(flags)))\n if loadPhysAC:\n result.merge(addFolders(flags,flags.LArCalib.PhysAutoCorr.Folder,detDb=flags.LArCalib.Input.Database, tag=PhysAutoCorrTag,modifiers=chanSelStr(flags)))\n\n \n \n result.merge(addFolders(flags,\"/LAR/ElecCalibOfl/OFCBin/PhysWaveShifts\",\"LAR_OFL\",tag=\"LARElecCalibOflOFCBinPhysWaveShifts-UPD3-00\"))\n result.merge(addFolders(flags,\"/LAR/Configuration/DSPConfiguration\",\"LAR_ONL\"))\n\n if (flags.LArCalib.OFC.UsePhysCalibTDiff):\n result.merge(addFolders(flags,flags.LArCalib.PhysCaliTdiff.Folder,detDb=\"LAR_OFL\", db=\"COMP200\", tag=PhysCaliTdiffTag))\n\n\n #def _ofcAlg(flags,postfix,folderSuffix,nPhases,dPhases,nDelays,nColl):\n if not loadPhysAC:\n #post-processing mode, fix SG key to allow subsequent OFC-phase picking\n key1=\"_unpicked\"\n else:\n key1=\"\"\n\n result.merge(_ofcAlg(flags,\"3ns%s\"%key1,\"%isamples3bins17phases\"%flags.LArCalib.OFC.Nsamples,nPhases=8,dPhases=3,nDelays=24,nColl=nColl))\n result.merge(_ofcAlg(flags,\"1ns\",\"%isamples%s\"%(flags.LArCalib.OFC.Nsamples,key1),nPhases=24,dPhases=1,nDelays=24,nColl=nColl))\n\n #RegistrationSvc \n result.addService(CompFactory.IOVRegistrationSvc(RecreateFolders = False))\n result.getService(\"IOVDbSvc\").DBInstance=\"\"\n\n #Ntuple writing\n rootfile=flags.LArCalib.Output.ROOTFile\n if rootfile != \"\":\n import os\n if os.path.exists(rootfile):\n os.remove(rootfile)\n result.addService(CompFactory.NTupleSvc(Output = [ \"FILE1 DATAFILE='\"+rootfile+\"' OPT='NEW'\" ]))\n result.setAppProperty(\"HistogramPersistency\",\"ROOT\")\n pass # end if ROOT ntuple writing\n\n\n #MC Event selector since we have no input data file\n from McEventSelector.McEventSelectorConfig import McEventSelectorCfg\n result.merge(McEventSelectorCfg(flags,\n RunNumber = flags.LArCalib.Input.RunNumbers[0],\n EventsPerRun = 1,\n FirstEvent\t = 1,\n InitialTimeStamp = 0,\n TimeStampInterval = 1))\n\n from PerfMonComps.PerfMonCompsConfig import PerfMonMTSvcCfg\n result.merge(PerfMonMTSvcCfg(flags))\n \n return result\n\n\n\nif __name__ == \"__main__\":\n\n\n from AthenaConfiguration.AllConfigFlags import initConfigFlags\n ConfigFlags=initConfigFlags()\n from LArCalibProcessing.LArCalibConfigFlags import addLArCalibFlags\n addLArCalibFlags(ConfigFlags)\n\n ConfigFlags.Input.Files=[]\n ConfigFlags.LArCalib.Input.RunNumbers=[500000]\n ConfigFlags.LArCalib.Input.Database=\"/home/wlampl/calibTest/00403758_00403761_00403762_EndCap-EMB-EMEC_HIGH_40_21.0.20_1/poolFiles/myDB200_00403758_00403761_00403762_EB-EMECC_one.db\"\n ConfigFlags.LArCalib.Input.SubDet=\"EM\"\n ConfigFlags.LArCalib.BadChannelDB=\"/home/wlampl/calibTest/00403758_00403761_00403762_EndCap-EMB-EMEC_HIGH_40_21.0.20_1/poolFiles/SnapshotBadChannel_00403758_00403761_00403762_EB-EMECC.db\"\n ConfigFlags.LArCalib.OFC.Ncoll=20\n ConfigFlags.LArCalib.BadChannelTag=\"-RUN2-UPD3-00\"\n ConfigFlags.LArCalib.Output.ROOTFile=\"larofc.root\"\n ConfigFlags.IOVDb.DatabaseInstance=\"CONDBR2\"\n ConfigFlags.IOVDb.DBConnection=\"sqlite://;schema=output.sqlite;dbname=CONDBR2\"\n ConfigFlags.IOVDb.GlobalTag=\"LARCALIB-RUN2-02\"\n ConfigFlags.GeoModel.AtlasVersion=\"ATLAS-R3S-2021-03-00-00\"\n ConfigFlags.IOVDb.DatabaseInstance=\"CONDBR2\"\n ConfigFlags.LAr.doAlign=False\n ConfigFlags.Input.RunNumber=ConfigFlags.LArCalib.Input.RunNumbers[0]\n #ConfigFlags.Exec.OutputLevel=1\n ConfigFlags.fillFromArgs()\n ConfigFlags.lock()\n cfg=MainServicesCfg(ConfigFlags)\n cfg.merge(LArOFCPhysCfg(ConfigFlags))\n\n print(cfg.getService(\"IOVDbSvc\").Folders)\n print(\"Start running...\")\n cfg.run(1)\n","repo_name":"Yusuf-Manjra/athena","sub_path":"LArCalorimeter/LArExample/LArCalibProcessing/python/LArCalib_OFCPhysConfig.py","file_name":"LArCalib_OFCPhysConfig.py","file_ext":"py","file_size_in_byte":10151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30715115674","text":"class ValorVctr:\n def __init__(self,i,valor):\n self.i = i\n self.valor = valor\n\nclass ValorMtrz:\n def __init__(self,i,j,valor):\n self.i = i\n self.j = j\n self.valor = valor\n\nclass Robot:\n def __init__(self,i,nombre,tipo,capacidad = 0):\n self.i = i\n self.nombre = nombre\n self.tipo = tipo\n self.capacidad = capacidad\n\nclass UMilitar:\n def __init__(self,i,fila,columna,capacidad):\n self.i = i\n self.fila = fila\n self.columna = columna\n self.capacidad = capacidad\n\nclass NCiudad:\n def __init__(self,i,filas,columnas,nombre,mapa,uMilitar):\n self.i = i\n self.filas = filas\n self.columnas = columnas\n self.nombre = nombre\n self.mapa = mapa\n self.uMilitar = uMilitar","repo_name":"bryan967132/IPC2_Proyecto2_201908355","sub_path":"Programa/Constructores.py","file_name":"Constructores.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29463652353","text":"import pandas as pd\nimport re\nfrom tqdm.notebook import tqdm\nimport snscrape.modules.twitter as sntwitter \nimport pymysql\nfrom datetime import datetime, timedelta\nimport schedule\nimport time\nimport query \nimport classify_with_model\nimport preprocessing\n\n# 단어 리스트\nsearch_keyword_list = {\n '기타': ['드라퍼', '브액', '아이스드랍', '주사기', '클럽약', '텔레', '파티약', '허브', '후리베이스'],\n '대마': ['대마초', '사티바', '인디카', '합성대마', '해시시'],\n '메스암페타민': ['사끼', '차가운술', '작대기', '떨액', '크리스탈', '삥두', '시원한술', '필로폰', '아이스술', '히로뽕', '액상떨', '아이스', '북한산아이스', '빙두', '찬술', '샤부'],\n '사일로시빈': ['마법의버섯', '환각버섯'],\n '아편': ['스틸녹스', '신의눈물', '에토미데이트', '옥시코돈', '졸피뎀', '트라마돌'],\n '알킬니트라이트': ['러쉬파퍼', '랏슈', '정글주스'],\n '케타민': ['캔디케이', '케타민'],\n '코카인': ['서울코카인', '충북코카인', '충남코카인', '강원코카인', '경기코카인', '전북코카인', '전남코카인', '경북코카인', '경남코카인', '제주코카인', '강남코카인', '부산코카인', '인천코카인'],\n 'GHB': ['물뽕', '발정제', '최음제'],\n 'LSD': ['엘에스디'],\n 'MDMA': ['엑스터시', '서울도리도리', '충북도리도리', '충남도리도리', '강원도리도리', '경기도리도리', '전북도리도리', '전남도리도리', '경북도리도리', '경남도리도리', '제주도리도리', '강남도리도리', '부산도리도리', '인천도리도리', '서울몰리', '충북몰리', '충남몰리', '강원몰리', '경기몰리', '전북몰리', '전남몰리', '경북몰리', '경남몰리', '제주몰리', '강남몰리', '부산몰리', '인천몰리']\n }\n \nexcept_words = ['허브맛', '허브맛쿠키', '허브솔트', '스파허브', '아이허브', '미국', '대회', 'F1', '유아인', '휘성', '검찰', '해시브라운', '시간', '웃어', '웃으', '시시해', \\\n '에프엑스', 'fx', '정수정', '크리스탈라이트', '제시카', \\\n '아이스베어', '아이스탕후루', '아이스만주', '아이스만쥬', '아메리카노', '얼죽아', '블랙아이스', '아이스크림', '초코', '커피', '카페', '아이스께끼', '찰떡', '아이스티', '겨울', '라떼', '에스프레소', '하키', '팝업', '주문', '당첨', '블렌드', '블렌디드', '바닐라', '헤이즐넛', '모찌', '케이크', '음료', '콜드브루', '프라푸치노', '엔시티', '스톰', '아이스맨', '매브', '매버릭', \\\n '남경필', '한서희', '브레이킹 배드', '돈스파이크', \\\n '브레이킹 배드', \\\n '샤브샤브', '샤브', \\\n '오마이걸 유빈', \\\n 'PD수첩', '히어로물뽕', '홍준표', '돼지', \\\n '몰리면', '홀리몰리', '홀리 몰리', '과카몰리', '몰리게', '내몰리', '몰리는', '미스몰리', \\\n '엑스토시움', \\\n '유아인', '허성태', '코카인댄스', \\\n '머쉬룸 스프', '머쉬룸스프', '수프', '버거', '파스타', '맛집', '표고버섯', '치즈', '피자', \\\n '양지원', \\\n '의사', '병원', '처방받', '처방 받', '졸피뎀과 나', '처벌', '구속', '불면', \\\n '정글쥬스', \\\n '전두환 손자', '전우원', '돈스파이크', '유아인', \\\n '병원','여드름','뾰루지','얼굴','흉터','흉','상처',\\\n '라이브','북클럽','콘서트','팬미팅','팬클럽','공연','대리', '음향', '춤', '50억', '비리', '수사', '대리티켓팅', '50억클럽', '멜론', '수작', '냄새', '웹툰', '게임'\n ]\n\n#####################################################################################\n\ndef crawl_for_period(\n type: str,\n search_query: str,\n start_date: str,\n end_date: str,\n except_words: list # 제외어 리스트 \n ):\n \n query = str(search_query) + \" since:\" + str(start_date) + \" until:\" + str(end_date)\n print(f\"검색 query: {query}\")\n\n # 트위터 데이터 저장할 리스트\n tweets_list = []\n \n for i, tweet in (enumerate(sntwitter.TwitterSearchScraper(query).get_items())): \n # 수집할 데이터 컬럼\n data = [\n type, \n search_query,\n tweet.date, \n tweet.id,\n tweet.user.username,\n tweet.user.displayname,\n tweet.place,\n tweet.user.location,\n tweet.content, \n tweet.likeCount,\n tweet.retweetCount, \n tweet.viewCount,\n tweet.hashtags,\n tweet.media, \n tweet.sourceLabel\n ]\n\n # 트윗 내용에 제외어 하나라도 포함시 제외하기\n if any(words in tweet.content for words in except_words):\n continue\n \n # 리트윗 데이터는 제외하기 (ex. @닉네임)\n regex = re.compile(\"@[\\w_]+\")\n if regex.search(tweet.content):\n continue \n \n # tweet.content 전처리\n data[8] = preprocessing.preprocessing_data(tweet.content)\n \n # media, hashtag 리스트를 string으로 변환해서 저장하기\n if isinstance(data[-2], list): \n data[-2] = str(data[-2])\n if isinstance(data[-3], list): \n data[-3] = str(data[-3])\n \n # 모델이 content에 대해서 classification 수행\n pred_label, prediction, _ = classify_with_model.test_sentences(data[8])\n \n # 마약 거래 게시글이 맞으면 tweets_list에 넣기\n if pred_label == 1: \n data.append(pred_label)\n data.append(prediction)\n tweets_list.append(data) \n print(f'[{search_query}] 키워드에 대해서 분류된 트윗 개수: {len(tweets_list)}')\n return tweets_list\n\ndef search_twitter():\n print(f'{datetime.today()} 작업 실행')\n \n # DB에 3일간의 데이터만 저장하기 위해 기존 데이터 지우기\n query.delete_from_db('DELETE FROM classified')\n search_query = ''\n \n # 3일간의 데이터 추적\n start_date= (datetime.today() -timedelta(3)).strftime(\"%Y-%m-%d\")\n end_date = datetime.today().strftime(\"%Y-%m-%d\")\n \n # 키워드 하나씩 db에 저장\n for type1, type2 in search_keyword_list.items():\n for t in type2:\n print(f'type1: {type1}, type2: {t}')\n search_query = t\n tweets_list = crawl_for_period(type1, search_query, start_date, end_date, except_words) \n # mysql DB에 저장\n if len(tweets_list) > 0:\n query.save_to_db(tweets_list)\n print(f\"\\n ------------------- 총 {len(tweets_list)}개 게시글 저장 완료 -------------------\\n\\n\")\n# main 실행\nsearch_twitter()\n\n","repo_name":"pastel-blue/2023_dscap","sub_path":"Prototype/crawler/main_crawler.py","file_name":"main_crawler.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24691258733","text":"# -*- coding:utf-8 -*-\nimport socket\nimport pyaudio\nimport Dynaknock\n\nCHUNK = 512\nRATE = 44100\n\n\ndef create_server_socket(port):\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_sock.bind(('', port))\n server_sock.listen(256)\n print('Server Run Port:{}'.format(port))\n return server_sock\n\n\ndef accept_loop(server_sock):\n print('Ready For Accept')\n new_sock, (remote_host, remote_remport) = server_sock.accept()\n return new_sock\n\n\ndef create_audio_stream(chunk, rate):\n p = pyaudio.PyAudio()\n stream = p.open(\n format=pyaudio.paInt16,\n channels=1,\n rate=rate,\n input=True,\n output=False,\n frames_per_buffer=chunk\n )\n return stream\n\n\nif __name__ == \"__main__\":\n\n # Launch Socket Server\n server_sock = create_server_socket(7777)\n\n # waiting connection from client\n sock = accept_loop(server_sock)\n\n # create audio stream\n stream = create_audio_stream(CHUNK, RATE)\n\n # start detection\n analyzer = Dynaknock.Analyzer(stream, sock)\n analyzer.start_detection()","repo_name":"Hikaru-Ito/DynaKnock","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1011855850","text":"#for normalizing csv files\nfrom soupclass8 import C_sort, r_csv\n\n\n\nclass Csv_norm(object):\n\tdef __init__(self, fname, datatypes):\n\t\tself.fname = fname\n\t\tself.dataypes = datatypes\n\t\torig_csv = C_sort(fname)\n\t\tcontents = self.orig_csv.contents\n\n\tdef main(self, fname):\n\t\tpass\n\n\tdef int_test(x, repl=1337):\n\t\tfor i in range(0, len(x)):\n\t\t\ttry:\n\t\t\t\tint(x[i])\n\t\t\texcept ValueError as VE:\n\t\t\t\tx[i] = repl\n\t\treturn x\n\tdef date_test(x, repl='1337-01-01'):\n\t\tfor i in range(0, len(x)):\n\t\t\ttry:\n\t\t\t\tx[i].split('-')\n\t\t\texcept SyntaxError as SE:\n\t\t\t\tx[i] = repl\n\t\t\telse:\n\t\t\t\tif len(x[i].split('-')) != 3 and '/' in x[i]:\n\t\t\t\t\tx[i] = re.sub('/', '-', x[i])\n\n\t\treturn x\n\n\n\n\n\n\n\t\t#examines list and tests for ","repo_name":"awaddell77/MySQL-DB-Management","sub_path":"csv_norm.py","file_name":"csv_norm.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21733638698","text":"import os.path as op\nimport numpy as np\nimport pytest\n\nimport fsl_mrs.utils.mrs_io as mrsio\nfrom fsl_mrs.utils.fitting import fit_FSLModel\nimport fsl_mrs.utils.quantify as quant\nfrom fsl_mrs.utils.constants import STANDARD_T1, STANDARD_T2\n\nmetabfile = op.join(op.dirname(__file__), 'testdata/quantify/Cr_10mM_test_water_scaling_WS.txt')\nh2ofile = op.join(op.dirname(__file__), 'testdata/quantify/Cr_10mM_test_water_scaling_nWS.txt')\nbasisfile = op.join(op.dirname(__file__), 'testdata/quantify/basisset_JMRUI')\n\n\ndef test_QuantificationInfo():\n qci = quant.QuantificationInfo(0.000, 40, ['Cr', 'NAA'], 298)\n assert qci.relax_corr_water_molal > 55500\n assert qci.relax_corr_water_molar > 55500\n\n qci = quant.QuantificationInfo(0.000, 40, ['Cr', 'NAA'], 127)\n assert qci.relax_corr_water_molal > 55500\n assert qci.relax_corr_water_molar > 55500\n\n qci = quant.QuantificationInfo(0.010, 3, ['Cr', 'NAA'], 127)\n t2s = STANDARD_T2['3T']\n t1s = STANDARD_T1['3T']\n assert np.isclose(qci.R_H2O_WM, np.exp(-0.010 / t2s['H2O_WM']) * (1 - np.exp(-3 / t1s['H2O_WM'])))\n assert np.isclose(qci.R_H2O_GM, np.exp(-0.010 / t2s['H2O_GM']) * (1 - np.exp(-3 / t1s['H2O_GM'])))\n assert np.isclose(qci.R_H2O_CSF, np.exp(-0.010 / t2s['H2O_CSF']) * (1 - np.exp(-3 / t1s['H2O_CSF'])))\n\n qci = quant.QuantificationInfo(0.010, 3, ['Cr', 'NAA'], 298)\n t2s = STANDARD_T2['7T']\n t1s = STANDARD_T1['7T']\n assert np.isclose(qci.R_H2O_WM, np.exp(-0.010 / t2s['H2O_WM']) * (1 - np.exp(-3 / t1s['H2O_WM'])))\n assert np.isclose(qci.R_H2O_GM, np.exp(-0.010 / t2s['H2O_GM']) * (1 - np.exp(-3 / t1s['H2O_GM'])))\n assert np.isclose(qci.R_H2O_CSF, np.exp(-0.010 / t2s['H2O_CSF']) * (1 - np.exp(-3 / t1s['H2O_CSF'])))\n\n assert qci.ref_metab == 'Cr'\n assert qci.ref_protons == 5\n assert qci.ref_limits == (2, 5)\n\n qci = quant.QuantificationInfo(0.010, 3, ['NAA'], 298)\n assert qci.ref_metab == 'NAA'\n assert qci.ref_protons == 3\n assert qci.ref_limits == (1.8, 2.2)\n\n qci.set_fractions({'GM': 0.45, 'WM': 0.45, 'CSF': 0.1})\n assert qci._fractions is not None\n\n assert np.isclose(qci.csf_corr, 1 / 0.9)\n\n qci.add_corr = 5.0\n assert qci.add_corr == 5.0\n\n\ndef test_volumefraction_calc():\n qci = quant.QuantificationInfo(0.010, 3, ['NAA'], 298)\n qci.set_fractions({'GM': 0.45, 'WM': 0.40, 'CSF': 0.15})\n assert qci.f_GM == 0.45\n assert qci.f_WM == 0.40\n assert qci.f_CSF == 0.15\n\n with pytest.warns(UserWarning):\n qci.set_fractions({'GM': 0.49, 'WM': 0.49, 'CSF': 0.0})\n\n assert qci.f_GM == 0.5\n assert qci.f_WM == 0.5\n assert qci.f_CSF == 0.0\n\n with pytest.raises(ValueError) as exc_info:\n qci.set_fractions({'GM': 0.44, 'WM': 0.40, 'CSF': 0.05})\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"fractions must be a dict containing 'WM', 'GM', 'CSF' keys\"\\\n \", and must sum to 1. Currently they are:\"\\\n \" {'GM': 0.44, 'WM': 0.4, 'CSF': 0.05} (sum=0.8900).\"\n\n\ndef test_molefraction_calc():\n qci = quant.QuantificationInfo(0.010, 3, ['NAA'], 298)\n qci.set_fractions({'GM': 0.45, 'WM': 0.40, 'CSF': 0.15})\n\n # Densitites are 'GM': 0.78, 'WM': 0.65, 'CSF': 0.97\n sum_frac = (0.45 * 0.78 + 0.40 * 0.65 + 0.15 * 0.97)\n assert np.isclose(qci.f_GM_H2O, 0.45 * 0.78 / sum_frac)\n assert np.isclose(qci.f_WM_H2O, 0.40 * 0.65 / sum_frac)\n assert np.isclose(qci.f_CSF_H2O, 0.15 * 0.97 / sum_frac)\n\n\ndef test_corrected_water_conc():\n # No relaxation\n qci = quant.QuantificationInfo(1E-10, 1E5, ['NAA'], 298)\n qci.set_fractions({'GM': 1.00, 'WM': 0.0, 'CSF': 0.0})\n\n print(qci.relax_corr_water_molal)\n print(qci.relax_corr_water_molar)\n # Molality should be close to pure water as density term cancels\n assert np.isclose(qci.relax_corr_water_molal, 55510)\n # Molarity should be scaled by density term as volume fixed\n assert np.isclose(qci.relax_corr_water_molar, 55510 * 0.78)\n\n qci.set_fractions({'GM': 0.50, 'WM': 0.5, 'CSF': 0.0})\n\n print(qci.relax_corr_water_molal)\n print(qci.relax_corr_water_molar)\n # Molality should be close to pure water as density term cancels\n assert np.isclose(qci.relax_corr_water_molal, 55510)\n # Molarity should be scaled by density terms as volume fixed\n assert np.isclose(qci.relax_corr_water_molar, 55510 * (0.78 + 0.65) / 2)\n\n qci = quant.QuantificationInfo(1E-10, 1, ['NAA'], 298)\n qci.set_fractions({'GM': 0.50, 'WM': 0.5, 'CSF': 0.0})\n\n print(qci.relax_corr_water_molal)\n print(qci.relax_corr_water_molar)\n # Molality should scaled by relaxation terms in proportion of mole fraction.\n mf_gm = 0.5 * 0.78 / (0.5 * 0.78 + 0.5 * 0.65)\n mf_wm = 0.5 * 0.65 / (0.5 * 0.78 + 0.5 * 0.65)\n assert np.isclose(qci.relax_corr_water_molal, 55510 * (qci.R_H2O_GM * mf_gm + qci.R_H2O_WM * mf_wm))\n # Molarity should be scaled by density terms * relaxation terms\n assert np.isclose(qci.relax_corr_water_molar, 55510 * (0.78 * qci.R_H2O_GM + 0.65 * qci.R_H2O_WM) / 2)\n\n\ndef test_quantifyWater():\n basis = mrsio.read_basis(basisfile)\n data = mrsio.read_FID(metabfile)\n dataw = mrsio.read_FID(h2ofile)\n\n mrs = data.mrs(basis=basis,\n ref_data=dataw)\n mrs.keep = ['Cr']\n mrs.check_FID(repair=True)\n mrs.check_Basis(repair=True)\n\n Fitargs = {'ppmlim': [0.2, 5.2],\n 'method': 'MH',\n 'baseline_order': 0,\n 'metab_groups': [0]}\n\n res = fit_FSLModel(mrs, **Fitargs)\n\n tissueFractions = {'GM': 0.6, 'WM': 0.4, 'CSF': 0.0}\n TE = 0.03\n TR = 20\n T2dict = {'H2O_GM': 0.110,\n 'H2O_WM': 0.080,\n 'H2O_CSF': 2.55,\n 'METAB': 0.160}\n\n q_info = quant.QuantificationInfo(\n TE,\n TR,\n mrs.names,\n mrs.centralFrequency / 1E6,\n t2=T2dict)\n\n q_info.set_fractions(tissueFractions)\n\n res.calculateConcScaling(mrs,\n q_info,\n internal_reference=['Cr'],\n verbose=True)\n\n print(res.getConc(scaling='raw'))\n print(res.getConc(scaling='internal'))\n print(res.getConc(scaling='molality'))\n print(res.getConc(scaling='molarity'))\n\n assert np.allclose(res.getConc(scaling='internal'), 1.0)\n assert np.allclose(res.getConc(scaling='molarity'), 10.78, atol=3E-1)\n assert np.allclose(res.getConc(scaling='molality'), 10.78 * 1 / (0.6 * 0.78 + 0.4 * 0.65), atol=3E-1)\n","repo_name":"wtclarke/fsl_mrs","sub_path":"fsl_mrs/tests/test_utils_quantify.py","file_name":"test_utils_quantify.py","file_ext":"py","file_size_in_byte":6508,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"}
+{"seq_id":"31327771183","text":"from turtle import Screen, Turtle\r\nimport pandas as pd\r\n\r\n\r\nscreen = Screen()\r\nturtle = Turtle()\r\n\r\n\r\nscreen.setup(width=750, height=1000)\r\nscreen.title(\"Philippine Provinces Game\")\r\n\r\nimage = \"Philippines_administrative_map_blank1.gif\"\r\nscreen.addshape(image)\r\n\r\nturtle.shape(image)\r\n\r\nx_click = 0\r\ny_click = 0\r\n\r\n\r\ndef get_coor():\r\n screen.onscreenclick(modify_variable)\r\n\r\n\r\ndef modify_variable(raw_x, raw_y):\r\n global x_click\r\n global y_click\r\n\r\n x_click = int(raw_x // 1)\r\n y_click = int(raw_y // 1)\r\n\r\n file = pd.DataFrame([x_click, y_click], index=[\"x\", \"y\"])\r\n\r\n # print(x_click)\r\n # print(y_click)\r\n print(file)\r\n\r\n\r\nget_coor()\r\nscreen.mainloop()\r\n","repo_name":"mikeme09/Guess-the-PH-Provinces-Game","sub_path":"get_coordinates.py","file_name":"get_coordinates.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41638126988","text":"import pytest\nimport numpy as np\nimport openpnm as op\nimport numpy.testing as nt\n\n\nclass TransientImplicitReactiveTransportTest:\n\n def setup_class(self):\n np.random.seed(0)\n self.net = op.network.Cubic(shape=[3, 3, 1], spacing=1e-6)\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.geo['pore.volume'] = 1e-12\n self.phase = op.phases.GenericPhase(network=self.net)\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n self.phys['pore.A'] = -1e-13\n self.phys['pore.k'] = 2\n self.phys['throat.diffusive_conductance'] = 1e-12\n mod = op.models.physics.generic_source_term.standard_kinetics\n self.phys.add_model(propname='pore.reaction',\n model=mod,\n prefactor='pore.A',\n exponent='pore.k',\n X='pore.concentration',\n regen_mode='deferred')\n self.settings = {'conductance': 'throat.diffusive_conductance',\n 'quantity': 'pore.concentration'}\n self.alg = op.algorithms.TransientReactiveTransport(network=self.net,\n phase=self.phase,\n settings=self.settings)\n self.alg.setup(quantity='pore.concentration',\n conductance='throat.diffusive_conductance',\n t_initial=0, t_final=1, t_step=0.1, t_tolerance=1e-7,\n t_precision=10, rxn_tolerance=1e-6)\n self.alg.set_value_BC(pores=self.net.pores('front'), values=2)\n self.alg.set_source(propname='pore.reaction',\n pores=self.net.pores('back'))\n self.alg.set_IC(0)\n\n def test_transient_implicit_reactive_transport(self):\n self.alg.setup(t_scheme='implicit')\n self.alg.run()\n x = [2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n\n def test_transient_cranknicolson_reactive_transport(self):\n self.alg.setup(t_scheme='cranknicolson')\n self.alg.run()\n x = [2., 0.97167537, 0.4209642,\n 2., 0.97167537, 0.4209642,\n 2., 0.97167537, 0.4209642]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n\n def test_transient_reactive_transport_output_times(self):\n self.alg.setup(t_output=[0, 0.5, 0.7, 1])\n self.alg.run()\n times = [\"pore.concentration@0\",\n \"pore.concentration@5e-1\",\n \"pore.concentration@7e-1\",\n \"pore.concentration@1\"]\n assert set(times).issubset(self.alg.keys())\n\n def test_transient_reactive_transport_results(self):\n times_total = [\"pore.concentration@0\",\n \"pore.concentration@5e-1\",\n \"pore.concentration@7e-1\",\n \"pore.concentration@1\"]\n results_total = set(self.alg.results(steps=None).keys())\n results_total.discard(\"pore.concentration\")\n assert set(times_total) == results_total\n times_partial = [\"pore.concentration@5e-1\",\n \"pore.concentration@1\"]\n results_partial = set(self.alg.results(times=[0.5, 1]).keys())\n results_partial.discard(\"pore.concentration\")\n assert set(times_partial) == results_partial\n\n def test_transient_steady_mode_reactive_transport(self):\n self.alg.setup(t_scheme=\"steady\")\n self.alg.run()\n x = [2, 1.76556357, 1.53112766,\n 2, 1.76556357, 1.53112766,\n 2, 1.76556357, 1.53112766]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n self.alg.run()\n\n def test_consecutive_runs_preserves_solution(self):\n self.alg.setup(t_scheme='implicit')\n self.alg.run()\n x = [2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096,\n 2, 0.95029957, 0.41910096]\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n self.alg.run()\n y = self.alg[\"pore.concentration\"]\n nt.assert_allclose(y, x, rtol=1e-5)\n\n def test_adding_bc_over_sources(self):\n with pytest.raises(Exception):\n self.alg.set_value_BC(pores=self.net.pores(\"right\"), values=0.3)\n\n def test_adding_sources_over_bc(self):\n with pytest.raises(Exception):\n self.alg.set_source(propname='pore.reaction',\n pores=self.net.pores('left'))\n\n def test_ensure_settings_are_valid(self):\n alg = op.algorithms.TransientReactiveTransport(network=self.net,\n phase=self.phase)\n with pytest.raises(Exception, match=r\".*quantity.*\"):\n alg.run()\n alg.settings['quantity'] = 'pore.concentration'\n with pytest.raises(Exception, match=r\".*conductance.*\"):\n alg.run()\n alg.settings['conductance'] = 'throat.conductance'\n with pytest.raises(Exception):\n alg.run()\n\n def teardown_class(self):\n ws = op.Workspace()\n ws.clear()\n\n\nif __name__ == '__main__':\n\n t = TransientImplicitReactiveTransportTest()\n t.setup_class()\n self = t\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n","repo_name":"halotudio/openPNM-copy2","sub_path":"tests/unit/algorithms/TransientReactiveTransportTest.py","file_name":"TransientReactiveTransportTest.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28771001505","text":"import mock\n\nclass IutilTest(mock.TestCase):\n def setUp(self):\n self.setupModules(\n ['_isys', 'logging', 'pyanaconda.anaconda_log', 'block'])\n\n import pyanaconda\n pyanaconda.anaconda_log = mock.Mock()\n\n def tearDown(self):\n self.tearDownModules()\n\n def copy_to_sysimage_test(self):\n from pyanaconda import iutil\n fs = mock.DiskIO()\n self.take_over_io(fs, iutil)\n self.assertEqual(iutil.copy_to_sysimage(\"/etc/securetty\"), False)\n\n fs[\"/etc/securetty\"] = \"tty1\"\n iutil.os.makedirs = mock.Mock()\n iutil.shutil.copy = mock.Mock()\n self.assertEqual(iutil.copy_to_sysimage(\"/etc/securetty\"), True)\n iutil.os.makedirs.assert_called_with(\"/mnt/sysimage/etc\")\n iutil.shutil.copy.assert_called_with(\"/etc/securetty\",\n \"/mnt/sysimage/etc/securetty\")\n","repo_name":"mattias-ohlsson/anaconda","sub_path":"tests/pyanaconda_test/iutil_test.py","file_name":"iutil_test.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"21929933444","text":"import tuned.logs\n\n__all__ = [\"PluginLoader\"]\n\nlog = tuned.logs.get()\n\nclass PluginLoader(object):\n\t__slots__ = [\"_namespace\", \"_prefix\", \"_interface\"]\n\n\tdef _set_loader_parameters(self):\n\t\t\"\"\"\n\t\tThis method has to be implemented in child class and should\n\t\tset _namespace, _prefix, and _interface member attributes.\n\t\t\"\"\"\n\t\traise NotImplementedError()\n\n\tdef __init__(self):\n\t\tsuper(PluginLoader, self).__init__()\n\n\t\tself._set_loader_parameters()\n\t\tassert type(self._namespace) is str\n\t\tassert type(self._prefix) is str\n\t\tassert type(self._interface) is type and issubclass(self._interface, object)\n\n\tdef load_plugin(self, plugin_name):\n\t\tassert type(plugin_name) is str\n\t\tmodule_name = \"%s.%s%s\" % (self._namespace, self._prefix, plugin_name)\n\t\treturn self._get_class(module_name)\n\n\tdef _get_class(self, module_name):\n\t\tlog.debug(\"loading module %s\" % module_name)\n\t\tmodule = __import__(module_name)\n\t\tpath = module_name.split(\".\")\n\t\tpath.pop(0)\n\n\t\twhile len(path) > 0:\n\t\t\tmodule = getattr(module, path.pop(0))\n\n\t\tfor name in module.__dict__:\n\t\t\tcls = getattr(module, name)\n\t\t\tif type(cls) is type and issubclass(cls, self._interface):\n\t\t\t\treturn cls\n\n\t\traise ImportError(\"Cannot find the plugin class.\")\n","repo_name":"edwardbadboy/tuned-ubuntu","sub_path":"tuned/utils/plugin_loader.py","file_name":"plugin_loader.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"61"}
+{"seq_id":"7641568043","text":"import os\nimport math\nimport PIL.Image\nimport PIL.ImageDraw\nimport pyttsx3\nimport time\nimport tkinter as tk\nfrom tkinter import *\nimport torch\nimport torchvision.transforms as transforms\nfrom train import SoftmaxModel\nfrom train import ConvNet\n\nWIDTH = 32 * 12 # width of the window\nHEIGHT = 32 * 6 # height of the window\n\nLINE_WIDTH = 5 # width of the pen\nLINE_RESOLUTION = 5 # minimum length of line segment (larger means more accurate angles but rougher lines)\n\nANGLE_THRESHOLD = 70 # minimum angle to begin a new phoneme\nLOOP_THRESHOLD = 2 # moving the pen this close to a coordinate already in the phoneme counts as creating a loop\nLOOP_LENGTH = 3 # minimum unwound length of a loop (in LINE_RESOLUTIONs)\n\nDATA_PATH = './data/' # default data directory\nDATA_WIDTH = 128 # width of saved training data\nDATA_HEIGHT = 128 # height of saved training data\nPHONEMES = ['ay', 'd', 'ee', 'f', 'h', 'l', 'm', 'n', 'o', 'r', 's', 't', 'v'] # possible phonemes (in data order)\n\nNET = './conv_net.pkl' # neural network to run\nDEVICE = 'cpu' # device on which to run the net\n\n# UI and phoneme recorder for Gregg recognition tool\nclass Gregg(object):\n def __init__(self):\n self.window = tk.Tk()\n self.window.title('Gregg Recognition Tool')\n\n # set up buttons\n self.frame = Frame(self.window)\n self.frame.pack(side=tk.TOP, fill='x')\n self.clear_button = Button(self.frame, text='Clear', command=self.clear)\n self.clear_button.pack(side='left')\n self.read_button = Button(self.frame, text='Read', command=self.read)\n self.read_button.pack(side='left')\n self.speak_button = Button(self.frame, text='Speak', command=self.speak)\n self.speak_button.pack(side='left')\n self.label_button = Button(self.frame, text='Label', command=self.label_word)\n self.label_button.pack(side='left')\n self.phoneme_label = Label(self.frame, text='No phonemes.')\n self.phoneme_label.pack(side='right')\n\n # set up canvas\n self.canvas = Canvas(self.window, bg='white', width=WIDTH, height=HEIGHT)\n self.canvas.pack(side=tk.BOTTOM)\n self.clear()\n\n # bind left mouse button for drawing\n self.canvas.bind('', self.mouse_down)\n self.canvas.bind('', self.mouse_move)\n self.canvas.bind('', self.add_current_phoneme)\n\n # bind hotkeys\n self.window.bind('c', self.clear)\n self.window.bind('l', self.label_word)\n\n # get the net\n self.net = torch.load(NET)\n self.net.eval()\n\n # run UI loop\n self.window.mainloop()\n\n # subtract two tuples\n def sub_tuples(self, a, b):\n return (a[0] - b[0], a[1] - b[1])\n\n # get a phoneme based on its number\n def get_phoneme(self, num):\n return PHONEMES[num]\n \n # get the phoneme images for the current word\n def get_images(self):\n images = []\n\n for index in range(len(self.phoneme_list)):\n # get the position of the phoneme\n phoneme_x = min(coords[0] for coords in self.phoneme_list[index])\n phoneme_y = min(coords[1] for coords in self.phoneme_list[index])\n phoneme_coords = (phoneme_x, phoneme_y)\n\n # construct an image of the phoneme\n phoneme_image = PIL.Image.new('RGB', (DATA_WIDTH, DATA_HEIGHT), color='white')\n phoneme_draw = PIL.ImageDraw.Draw(phoneme_image)\n first_coords = self.sub_tuples(self.phoneme_list[index][0], phoneme_coords)\n if len(self.phoneme_list[index]) == 1:\n phoneme_draw.ellipse([first_coords, (first_coords[0] + LINE_WIDTH, first_coords[1] + LINE_WIDTH)], fill='black', width=LINE_WIDTH)\n phoneme_draw.line([self.sub_tuples(coords, phoneme_coords) for coords in self.phoneme_list[index]], fill='black', width=LINE_WIDTH, joint='curve')\n images.append(phoneme_image)\n\n return images\n\n # clear the canvas and the current word in memory\n def clear(self, event=None):\n self.canvas.delete('all')\n\n # draw grid lines\n grid_step = HEIGHT / 4\n grid_base = HEIGHT / 4 / 2\n for x in range(8):\n self.canvas.create_line(grid_base + x * grid_step, 0, grid_base + x * grid_step, HEIGHT, fill='light gray')\n for y in range(4):\n self.canvas.create_line(0, grid_base + y * grid_step, WIDTH, grid_base + y * grid_step, fill='light gray')\n\n # clear phonemes\n self.phoneme_list = []\n self.current_phoneme = []\n self.current_phoneme\n\n # use a neural net to predict the phonemes\n def read(self):\n # get predicitons for phonemes\n images = [transforms.ToTensor()(image).unsqueeze_(0) for image in self.get_images()]\n outputs = [self.net(image) for image in images]\n self.predictions = [torch.max(output, 1) for output in outputs]\n\n # convert predictions to letters\n self.predictions = [prediction[1].item() for prediction in self.predictions]\n self.predictions = [self.get_phoneme(prediction) for prediction in self.predictions]\n if self.predictions:\n self.phoneme_label.configure(text='Phonemes: '+''.join(self.predictions))\n else:\n self.phoneme_label.configure(text='No phonemes.')\n\n # speak phonemes aloud\n def speak(self):\n self.read()\n try:\n engine = pyttsx3.init()\n engine.say(''.join(self.predictions))\n engine.runAndWait()\n except:\n print('Cannot find text to speech engine.')\n\n # create a popup window to enter training labels\n def open_label_enter(self, label_num):\n self.label_window = Toplevel(self.window)\n directions_label = Label(self.label_window, text=f'Label for phoneme {label_num}:')\n directions_label.pack(side='top')\n self.label_entry = Entry(self.label_window)\n self.label_entry.pack(side='top', fill='x')\n self.label_entry.focus_set()\n self.label_entry.bind('', lambda event, label_window=self.label_window: self.close_label_enter(label_window))\n enter_button = Button(self.label_window, text='Enter', command=lambda:self.close_label_enter(self.label_window))\n enter_button.pack(side='bottom')\n\n # close the label entry popup window\n def close_label_enter(self, window, event=None):\n self.label = self.label_entry.get()\n window.destroy()\n\n # prompt the user to enter label data for each phoneme in a word\n def label_word(self, event=None):\n images = self.get_images()\n for index in range(len(images)):\n # prompt the user for a label\n self.open_label_enter(index + 1)\n self.window.wait_window(self.label_window)\n\n # make sure the directory for the label exists\n if not os.path.exists(DATA_PATH + self.label):\n os.makedirs(DATA_PATH + self.label)\n\n # store the image in the in the directory\n filename = str(time.time()) + '.jpg'\n images[index].save(DATA_PATH + self.label + '/' + filename)\n print(f'saved {filename}')\n\n self.clear()\n\n # add the current phoneme to the phoneme list\n def add_current_phoneme(self, event=None):\n if self.current_phoneme and (not self.loop_in_phoneme or len(self.current_phoneme) > LOOP_THRESHOLD):\n self.phoneme_list.append(self.current_phoneme)\n self.current_phoneme = self.current_phoneme[-1:]\n self.draw_phonemes()\n\n # draw the current phonemes in alternating colors\n def draw_phonemes(self):\n colors = ['navy', 'blue', 'light blue']\n color_index = 0\n for phoneme in self.phoneme_list:\n color_index = (color_index + 1) % len(colors)\n for index in range(len(phoneme)):\n x_prev = phoneme[index - 1][0] if index > 0 else phoneme[index][0]\n y_prev = phoneme[index - 1][1] if index > 0 else phoneme[index][1]\n self.canvas.create_line(x_prev, y_prev, phoneme[index][0], phoneme[index][1],\n width=LINE_WIDTH, fill=colors[color_index],\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n\n # start a new phoneme when the mouse is pressed\n def mouse_down(self, event):\n self.current_phoneme = [(event.x, event.y)]\n\n # draw a dot in case the button is immediately lifted\n self.x_prev = event.x\n self.y_prev = event.y\n self.prev_dir = None\n self.since_loop = 0\n self.loop_in_phoneme = False\n self.canvas.create_line(event.x, event.y, event.x, event.y,\n width=LINE_WIDTH, fill='black',\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n\n # draw and record the coordinates of mouse movements\n def mouse_move(self, event):\n if abs(event.x - self.x_prev) > LINE_RESOLUTION or abs(event.y - self.y_prev) > LINE_RESOLUTION:\n # create new phoneme on a sharp bend\n dir = math.atan2((self.x_prev - event.x), (self.y_prev - event.y))\n dir = 180 / math.pi * -dir\n if self.prev_dir:\n diff = dir - self.prev_dir\n if diff > 180: diff -= 360 \n if diff < -180: diff += 360\n if abs(diff) >= ANGLE_THRESHOLD:\n # we've encountered a sharp bend; create a new phoneme\n self.add_current_phoneme()\n self.prev_dir = dir\n\n # check if we're in a loop (i.e. we see a pixel we've already written to the current phoneme)\n self.since_loop += 1\n if len(self.current_phoneme) > LOOP_LENGTH:\n for index in range(len(self.current_phoneme) - LOOP_LENGTH):\n if (abs(event.x - self.current_phoneme[index][0]) <= LOOP_THRESHOLD \n and abs(event.y - self.current_phoneme[index][1]) <= LOOP_THRESHOLD\n and self.since_loop > LOOP_LENGTH):\n self.since_loop = 0\n self.loop_in_phoneme = True\n \n # splice the current phoneme into two\n if index > LOOP_THRESHOLD:\n self.phoneme_list.append(self.current_phoneme[0:index + 1])\n self.current_phoneme = self.current_phoneme[index:]\n self.add_current_phoneme()\n self.current_phoneme = []\n return\n\n # draw the movement to the canvas and record it in the current phoneme\n self.current_phoneme.append((event.x, event.y))\n self.canvas.create_line(self.x_prev, self.y_prev, event.x, event.y,\n width=LINE_WIDTH, fill='black',\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n self.x_prev = event.x\n self.y_prev = event.y\n\n# start Gregg recognition tool\nif __name__ == '__main__':\n Gregg()\n","repo_name":"pjhale2/gregg-recognition-tool","sub_path":"gregg.py","file_name":"gregg.py","file_ext":"py","file_size_in_byte":11118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4070576906","text":"from django.conf import settings\nfrom django.conf.urls.defaults import *\nfrom django.views.generic.simple import direct_to_template\nfrom mysite.liga.sitemap import TeamSitemap, TableResultsSitemap, PlayerSitemap, StaticSitemap\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom pinax.apps.account.openid_consumer import PinaxConsumer\n\n\nhandler500 = \"pinax.views.server_error\"\n\nsitemaps={\n \"team\":TeamSitemap,\n \"tableresults\":TableResultsSitemap,\n \"player\":PlayerSitemap,\n \"static\":StaticSitemap,\n}\n\nurlpatterns = patterns(\"\",\n #url(r\"^$\", direct_to_template, {\n # \"template\": \"homepage.html\",\n #}, name=\"home\"),\n url(r\"^$\", include(\"liga.urls\")),\n url(r\"^tabela/$\", direct_to_template, {\"template\": \"tabela.html\"}, name=\"tabela\"),\n url(r\"^profil/(?P\\w+)$\", 'mysite.liga.views.manage_profil'),\n url(r\"^tabela/pdf$\", 'mysite.liga.views.create_pdf'),\n url(r\"^druzyny$\", 'mysite.liga.views.manage_team'),\n url(r\"^mecze/(?P\\w+)$\", 'mysite.liga.views.manage_match'),\n url(r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),\n url(r\"^admin/invite_user/$\", \"pinax.apps.signup_codes.views.admin_invite_user\", name=\"admin_invite_user\"),\n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^about/\", include(\"about.urls\")),\n url(r\"^account/\", include(\"pinax.apps.account.urls\")),\n url(r\"^openid/\", include(PinaxConsumer().urls)),\n)\n\n\nif settings.SERVE_MEDIA:\n urlpatterns += patterns(\"\",\n url(r\"\", include(\"staticfiles.urls\")),\n )\n","repo_name":"winx88/ProjectsPythonDjango","sub_path":"Liga/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23572971991","text":"import sys\nassert sys.version_info >= (3, 5)\nfrom collections import Counter\n\n\ndef solve(prefix):\n N, K = [int(_) for _ in input().split()]\n lv = ((N, 1),)\n k = 1\n while K > k:\n assert sum(t[1] for t in lv) == k\n assert 1 <= len(lv) <= 2\n assert len(lv) == 1 or lv[0][0] == lv[1][0]+1\n K -= k\n k <<= 1\n nlv = Counter()\n for nn, kk in lv:\n nlv[nn>>1] += kk\n nlv[((nn+1)>>1)-1] += kk\n lv = tuple(sorted(nlv.items(), key=lambda t: t[0], reverse=True))\n n = lv[0][0] if K <= lv[0][1] else lv[1][0]\n print('{}{} {}'.format(prefix, n>>1, ((n+1)>>1)-1))\n\n\ndef main():\n T = int(input())\n for t in range(T):\n solve(prefix='Case #{}: '.format(t+1))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/635.py","file_name":"635.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20259516006","text":"# UVa 10130 - SuperSale\n# https://onlinejudge.org/external/101/10130.pdf\n\ndef knapsack(items, max_w):\n\tm = len(items)\n\tn = max_w\n\tdp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]\n\tfor i in range(1, m + 1):\n\t\titem_value = items[i - 1][0]\n\t\titem_weight = items[i - 1][1]\n\t\tfor j in range(1, n + 1):\n\t\t\tif j - item_weight < 0:\n\t\t\t\tdp[i][j] = dp[i - 1][j]\n\t\t\telse:\n\t\t\t\tdp[i][j] = max(dp[i - 1][j], item_value + dp[i - 1][j - item_weight])\n\treturn dp[m][n]\n\t\n\nif __name__ == \"__main__\":\n#\titems = [(2000, 3), (3000, 4), (1500, 1)]\n#\titems = [(72, 17), (44, 23), (31, 24)]\n#\titems = [(64, 26), (85, 22), (52, 4), (99, 18), (39, 13), (54, 9)]\n#\tprint(knapsack(items, 20))\n\ttc = int(input())\n\tfor _ in range(tc):\n\t\tn = int(input())\n\t\tobjects = []\n\t\tfor _ in range(n):\n\t\t\tobjects.append([int(x) for x in input().split()])\n\t\ttotal_value = 0\n\t\tpeople = int(input())\n\t\tfor _ in range(people):\n\t\t\tmax_weight = int(input())\n\t\t\ttotal_value += knapsack(objects, max_weight)\n\t\tprint(total_value)\n\n","repo_name":"eloyhz/competitive-programming","sub_path":"cpbook/3_problem_solving_paradigms/10130_supersale.py","file_name":"10130_supersale.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"8967127292","text":"\"\"\"resume_builder URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom resume_builderApp import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name='home'),\n path('person_create/', views.person_create, name='person_create'),\n path('projectorjob_create/', views.projectorjob_create, name='projectorjob_create'),\n path('areaofinterest_create/', views.areaofinterest_create, name='areaofinterest_create'),\n path('academicform_create/', views.academicform_create, name='academicform_create'),\n path('educationform_create/', views.educationform_create, name='educationform_create'),\n path('professionalskill_create/', views.professionalskill_create, name='professionalskill_create'),\n path('resume/', views.view, name='view'),\n path('download//', views.resumes, name='download'),\n]\n","repo_name":"rabhi1611/Resume-Builder-Django","sub_path":"resume_builder/resume_builder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25329380533","text":"class Employee:\n name = \"Ben\"\n designation = \"Sales Executive\"\n salesMadeThisWeek = 6\n\n def hasAchivedTarget(self):\n if self.salesMadeThisWeek >= 5:\n print(\"Target has been achived\")\n else:\n print(\"Target has not been achived\")\n\n\nemployeeOne = Employee()\nprint(employeeOne.name)\nemployeeOne.hasAchivedTarget()\n\nemployeeTwo = Employee()\nemployeeTwo.name = \"John\"\nprint(employeeTwo.name)\nemployeeTwo.hasAchivedTarget()","repo_name":"fahadfoysal/Four-piler-of-OOP-in-python","sub_path":"1_Class and object.py","file_name":"1_Class and object.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43279855380","text":"\"\"\"adding changes to upvote and downvote\n\nRevision ID: d0107b64a60f\nRevises: 773c607ae25f\nCreate Date: 2022-05-11 13:16:40.515449\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd0107b64a60f'\ndown_revision = '773c607ae25f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('pitch_id', sa.Integer(), nullable=True))\n op.drop_constraint('comments_pitch_fkey', 'comments', type_='foreignkey')\n op.create_foreign_key(None, 'comments', 'pitches', ['pitch_id'], ['id'])\n op.drop_column('comments', 'pitch')\n op.add_column('downvotes', sa.Column('user_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'downvotes', 'users', ['user_id'], ['id'])\n op.add_column('upvotes', sa.Column('user_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'upvotes', 'users', ['user_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'upvotes', type_='foreignkey')\n op.drop_column('upvotes', 'user_id')\n op.drop_constraint(None, 'downvotes', type_='foreignkey')\n op.drop_column('downvotes', 'user_id')\n op.add_column('comments', sa.Column('pitch', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'comments', type_='foreignkey')\n op.create_foreign_key('comments_pitch_fkey', 'comments', 'pitches', ['pitch'], ['id'])\n op.drop_column('comments', 'pitch_id')\n # ### end Alembic commands ###\n","repo_name":"Fridah-kalee/Pitches","sub_path":"migrations/versions/d0107b64a60f_adding_changes_to_upvote_and_downvote.py","file_name":"d0107b64a60f_adding_changes_to_upvote_and_downvote.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27706353368","text":"from django.urls import path\nfrom .views import AdvertisementView, AdvertisementManyView\n\n\napp_name = \"advertisements\"\n\n# app_name will help us do a reverse look-up latter.\nurlpatterns = [\n path('advertisements/', AdvertisementManyView.as_view()),\n path('advertisement/', AdvertisementView.as_view()),\n path('advertisement', AdvertisementView.as_view()),\n]","repo_name":"andru196/miniAd","sub_path":"siteApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5182313556","text":"'''\n\nsSFR distribution of galaxy catalogs\n\n'''\n\nimport warnings\nimport os\nimport numpy as np\n\n# -- Local --\n#from cenque import CenQue\nfrom util.cenque_utility import get_q_ssfr_mean\nfrom sfms.fitting import get_param_sfr_mstar_z\nfrom group_catalog.group_catalog import central_catalog\n\n\n# distance measurement between SSFR distribution data and model\ndef rho_ssfr_cq_evol(\n start_nsnap = 13, \n final_nsnap = 1, \n sf_prop = {'name': 'average'}, \n fq_prop = {'name': 'wetzelsmooth'}, \n tau_prop = {'name': 'instant'}, \n mass_evol = 'sham', \n Mrcut=18, \n **kwargs\n ): \n \"\"\" Compare sSFR distribution of evolved CenQue and\n SDSS Group Catalog in the green valley\n \"\"\"\n\n if Mrcut == 18: \n z_med = 0.03\n elif Mrcut == 19: \n z_med = 0.05\n elif Mrcut == 20: \n z_med = 0.08\n\n evol_cq_ssfr_bin, evol_cq_ssfr_hist = ssfr_cq_evol(\n start_nsnap = start_nsnap, \n final_nsnap = final_nsnap, \n sf_prop = sf_prop, \n fq_prop = fq_prop, \n tau_prop = tau_prop, \n mass_evol = mass_evol, \n **kwargs\n )\n\n group_ssfr = Ssfr()\n group_ssfr_bin, group_ssfr_hist = group_ssfr.groupcat(Mrcut=Mrcut)\n \n l2_ssfr = 0.0\n for i_massbin, massbin in enumerate(group_ssfr.mass_bins): \n\n if not np.array_equal(evol_cq_ssfr_bin[i_massbin], group_ssfr_bin[i_massbin]):\n raise ValueError()\n\n # sSFR comparison range\n\n q_ssfr_massbin = np.mean(get_q_ssfr_mean(massbin)) \n\n sfr_mstar_z, sig_sfr_mstar_z = get_param_sfr_mstar_z()\n\n sf_ssfr_massbin = sfr_mstar_z(massbin[1], z_med) - massbin[1]\n\n green_range = np.where(\n (evol_cq_ssfr_bin[i_massbin] > q_ssfr_massbin) &\n (evol_cq_ssfr_bin[i_massbin] < sf_ssfr_massbin)\n )\n\n #print np.sum((evol_cq_ssfr_hist[i_massbin][green_range] - group_ssfr_hist[i_massbin][green_range])**2)\n\n l2_ssfr += np.sum((evol_cq_ssfr_hist[i_massbin][green_range] - group_ssfr_hist[i_massbin][green_range])**2)\n\n return l2_ssfr\n\nif __name__ == \"__main__\":\n pass\n\n\"\"\"\n print rho_ssfr_cq_evol(\n start_nsnap = 13, \n final_nsnap = 1, \n sf_prop = {'name': 'average'}, \n fq_prop = {'name': 'wetzelsmooth'}, \n tau_prop = {'name': 'instant'}, \n mass_evol = 'sham', \n Mrcut=18\n ) \n\n print rho_ssfr_cq_evol(\n start_nsnap = 13, \n final_nsnap = 1, \n sf_prop = {'name': 'average'}, \n fq_prop = {'name': 'wetzelsmooth'}, \n tau_prop = {'name': 'satellite'}, \n mass_evol = 'sham', \n Mrcut=18\n ) \n\n print rho_ssfr_cq_evol(\n start_nsnap = 13, \n final_nsnap = 1, \n sf_prop = {'name': 'average'}, \n fq_prop = {'name': 'wetzelsmooth'}, \n tau_prop = {'name': 'line', 'fid_mass': 10.75, 'slope': -0.57, 'yint': 0.5}, \n mass_evol = 'sham', \n Mrcut=18\n ) \n \n print rho_ssfr_cq_evol(\n start_nsnap = 13, \n final_nsnap = 1, \n sf_prop = {'name': 'average'}, \n fq_prop = {'name': 'wetzelsmooth'}, \n tau_prop = {'name': 'line', 'fid_mass': 10.75, 'slope': -0.6, 'yint': 0.6}, \n mass_evol = 'sham', \n Mrcut=18\n ) \n\ndef ssfr_cq_evol(\n start_nsnap = 13, \n final_nsnap = 1, \n sf_prop = {'name': 'average'}, \n fq_prop = {'name': 'wetzelsmooth'}, \n tau_prop = {'name': 'instant'}, \n mass_evol = 'sham', \n **kwargs\n ): \n ''' Calculate sSFR distribution of evolved CenQue \n '''\n\n evolved_cq = CenQue(n_snap = final_nsnap, cenque_type = 'evol_from'+str(start_nsnap))\n evolved_cq.sf_prop = sf_prop\n evolved_cq.fq_prop = fq_prop\n evolved_cq.tau_prop = tau_prop\n evolved_cq.mass_evol = mass_evol\n \n if not os.path.isfile(evolved_cq.file()):\n start_cq = CenQue(n_snap = start_nsnap, cenque_type = 'sf_assigned')\n start_cq.readin()\n\n evolved_cq = evolve_cq(\n start_cq, \n final_nsnap = final_nsnap, \n sf_prop = sf_prop, \n fq_prop = fq_prop, \n tau_prop = tau_prop, \n mass_evol = mass_evol, \n **kwargs\n )\n else: \n evolved_cq.readin()\n\n evolved_cq_ssfr = Ssfr()\n\n return evolved_cq_ssfr.cenque(evolved_cq)\n\n\"\"\"\n","repo_name":"changhoonhahn/central_quenching","sub_path":"CenQue/archive/ssfr.py","file_name":"ssfr.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74954443393","text":"\nimport requests\nfrom tkinter import *\n\nimport pandas as pd \ntabela=pd.read_csv(r\"C:\\Users\\matheus_becker\\Desktop\\telecom_users.csv\")\n\ntabela = tabela.drop(\"Unnamed: 0\", axis=1)\ntabela[\"TotalGasto\"] = pd.to_numeric(tabela[\"TotalGasto\"], errors=\"coerce\")\n#tabela = tabela.drop(\"Unnamed: 0\", axis=0)\n\njanela = Tk()\ntexto=Label(janela, text=tabela)\ntexto.grid(column=0,row=0)\njanela.title(\"Atividade\")\njanela.mainloop()\n","repo_name":"BeckerMM/first_Test_python","sub_path":"atividade.py","file_name":"atividade.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42842001518","text":"from rest_framework import filters, mixins, viewsets\nfrom rest_framework.parsers import MultiPartParser\n\nfrom src.api.images.models import UploadedImages\nfrom src.api.images.serializers import ImageSerializer\n\n\nclass ImageViewSet(\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.CreateModelMixin,\n viewsets.GenericViewSet,\n):\n serializer_class = ImageSerializer\n queryset = UploadedImages.objects.all()\n\n lookup_field = \"id\"\n\n parser_classes = (MultiPartParser,)\n\n filter_backends = [\n filters.SearchFilter,\n ]\n search_fields = [\n \"title\",\n ]\n","repo_name":"chilledsnake/img_api","sub_path":"src/api/images/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8128223948","text":"#!/usr/bin/env python\n\nimport sys\nimport random\n\ndef write_header(f, name, type, height, width):\n f.write('RT_LOCAL_DATA %s %s[%d*%d];\\n' % (type, name, height, width))\n\ndef write_arr(f, name, arr, type, height, width):\n f.write('RT_L2_DATA %s %s[%d*%d] = {\\n' % (type, name, height, width))\n for i in range(0, height):\n for j in range(0, width):\n v = arr[i * height + j]\n f.write('%d, ' % (v))\n f.write('\\n')\n f.write('};\\n\\n')\n return\n\n################################################################################\n\ndef gen_stimuli(name, type, min, max, size):\n f = open(name, 'w')\n \n NR_ADD = 1\n HEIGHT = size\n WIDTH = size\n MAX_SIZE = 16*16\n \n m_a = []\n m_b = []\n m_exp = []\n \n for i in range(0,HEIGHT):\n for j in range(0,WIDTH):\n a = random.randint(min, max)\n b = random.randint(min, max)\n \n r = (a + b)\n \n m_a.append(a)\n m_b.append(b)\n m_exp.append(r)\n \n f.write('typedef %s mattype;\\n' % type)\n f.write('int matrixsizes = 16;\\n')\n f.write('#define NR_ADD %d\\n\\n' % NR_ADD)\n\n write_header(f, 'm_a', type, HEIGHT, WIDTH)\n write_header(f, 'm_b', type, HEIGHT, WIDTH)\n write_header(f, 'm_c', type, 1, MAX_SIZE)\n\n write_arr(f, 'm_a_l2', m_a, type, HEIGHT, WIDTH)\n write_arr(f, 'm_b_l2', m_b, type, HEIGHT, WIDTH)\n write_arr(f, 'm_exp_l2', m_exp, type, HEIGHT, WIDTH)\n \n f.write('#define WIDTH %d\\n' % HEIGHT)\n f.write('#define HEIGHT %d\\n'% WIDTH)\n\ngen_stimuli('matrixAdd32.h', \"int32_t\", -2**30, 2**30-1,16)\ngen_stimuli('matrixAdd16.h', \"int16_t\", -2**14, 2**14-1,16)\ngen_stimuli('matrixAdd8.h', \"int8_t\", -2**6, 2**6-1,16)\n","repo_name":"pulp-platform/pulp-training","sub_path":"matrixAdd/gen_stimuli.py","file_name":"gen_stimuli.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"71490355393","text":"def readPerline():\r\n\tfile=open(\"filename.txt\",'r')\r\n\twhile True:\r\n\t\tcontent=file.readline() # read per line\r\n\t\tprint(content)\r\n\t\tcontent=content.strip().split()\r\n\t\tname=content[0]\r\n\t\t# if content==\"\":\r\n\t\tprint(name)\r\n\t\tbreak\r\n\r\ndef readLines():\r\n\tfile=open(\"filename.txt\",'r')\r\n\tcontent=file.readlines() # read lines once\r\n\tprint(content)\r\n\t'''\r\n\twhile True:\r\n\t\tcontent=content.strip().split()\r\n\t\tname=content[0]\r\n\t\t# if content==\"\":\r\n\t\tprint(name)\r\n\t\tbreak\r\n\t'''\r\n# readPerline()\r\n# readLines()\r\nf=\"filename.txt\"\r\nf.open('r')\r\nrel=f.readlines()\r\nf.close()\r\nprint(rel)\r\n","repo_name":"Devinwon/master","sub_path":"fileOperation/FileReadWrite.py","file_name":"FileReadWrite.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5855025940","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport warnings\nimport tempfile\nimport shutil\nfrom subprocess import check_call\nfrom tarfile import TarFile\nfrom pkgutil import get_data\nfrom io import BytesIO\nfrom contextlib import closing\n\nfrom dateutil.tz import tzfile\n\n__all__ = [\"gettz\", \"rebuild\"]\n\n_ZONEFILENAME = \"dateutil-zoneinfo.tar.gz\"\n\n# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but\n# it's close enough for python2.6\n_tar_open = TarFile.open\nif not hasattr(TarFile, '__exit__'):\n def _tar_open(*args, **kwargs):\n return closing(TarFile.open(*args, **kwargs))\n\n\nclass tzfile(tzfile):\n def __reduce__(self):\n return (gettz, (self._filename,))\n\n\ndef getzoneinfofile_stream():\n try:\n return BytesIO(get_data(__name__, _ZONEFILENAME))\n except IOError as e: # TODO switch to FileNotFoundError?\n warnings.warn(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n return None\n\n\nclass ZoneInfoFile(object):\n def __init__(self, zonefile_stream=None):\n if zonefile_stream is not None:\n with _tar_open(fileobj=zonefile_stream, mode='r') as tf:\n # dict comprehension does not work on python2.6\n # TODO: get back to the nicer syntax when we ditch python2.6\n # self.zones = {zf.name: tzfile(tf.extractfile(zf),\n # filename = zf.name)\n # for zf in tf.getmembers() if zf.isfile()}\n self.zones = dict((zf.name, tzfile(tf.extractfile(zf),\n filename=zf.name))\n for zf in tf.getmembers() if zf.isfile())\n # deal with links: They'll point to their parent object. Less\n # waste of memory\n # links = {zl.name: self.zones[zl.linkname]\n # for zl in tf.getmembers() if zl.islnk() or zl.issym()}\n links = dict((zl.name, self.zones[zl.linkname])\n for zl in tf.getmembers() if\n zl.islnk() or zl.issym())\n self.zones.update(links)\n else:\n self.zones = dict()\n\n\n# The current API has gettz as a module function, although in fact it taps into\n# a stateful class. So as a workaround for now, without changing the API, we\n# will create a new \"global\" class instance the first time a user requests a\n# timezone. Ugly, but adheres to the api.\n#\n# TODO: deprecate this.\n_CLASS_ZONE_INSTANCE = list()\n\n\ndef gettz(name):\n if len(_CLASS_ZONE_INSTANCE) == 0:\n _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))\n return _CLASS_ZONE_INSTANCE[0].zones.get(name)\n\n\ndef rebuild(filename, tag=None, format=\"gz\", zonegroups=[]):\n \"\"\"Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*\n\n filename is the timezone tarball from ftp.iana.org/tz.\n\n \"\"\"\n tmpdir = tempfile.mkdtemp()\n zonedir = os.path.join(tmpdir, \"zoneinfo\")\n moduledir = os.path.dirname(__file__)\n try:\n with _tar_open(filename) as tf:\n for name in zonegroups:\n tf.extract(name, tmpdir)\n filepaths = [os.path.join(tmpdir, n) for n in zonegroups]\n try:\n check_call([\"zic\", \"-d\", zonedir] + filepaths)\n except OSError as e:\n if e.errno == 2:\n logging.error(\n \"Could not find zic. Perhaps you need to install \"\n \"libc-bin or some other package that provides it, \"\n \"or it's not in your PATH?\")\n raise\n target = os.path.join(moduledir, _ZONEFILENAME)\n with _tar_open(target, \"w:%s\" % format) as tf:\n for entry in os.listdir(zonedir):\n entrypath = os.path.join(zonedir, entry)\n tf.add(entrypath, entry)\n finally:\n shutil.rmtree(tmpdir)\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/dateutil/zoneinfo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"}
+{"seq_id":"74822392193","text":"\nimport json\nimport platform\nfrom time import sleep\n# os.system()\nimport os\nimport sys\n\nfrom build_sys.data_str import usage_text\nfrom build_sys.tools import test, sanitize, debug, benchmark, run, clean\nfrom build_sys.build import build\nfrom build_sys.change_name import change_proj_name\nfrom build_sys.bootstrap import bootstrap\nfrom build_sys.prod import prod\nfrom build_sys.docker import docker_build\nname=\"\"\ndef main():\n\n usage = usage_text()\n cof = open('.config/data.json')\n data = json.loads(cof.read())\n if len(sys.argv) == 1:\n print(usage)\n else:\n if str(sys.argv[1]) == \"build\":\n build(data)\n elif str(sys.argv[1]) == \"run\":\n run(data)\n elif str(sys.argv[1]) == \"bootstrap\":\n bootstrap(data)\n elif str(sys.argv[1]) == \"dev\":\n build(data)\n run(data)\n elif str(sys.argv[1]) == \"clean\":\n clean(data)\n elif str(sys.argv[1]) == \"rename\" and len(sys.argv) == 3:\n change_proj_name(data)\n\n elif str(sys.argv[1]) == \"debug\":\n debug(data)\n elif str(sys.argv[1]) == \"test\":\n test(data)\n elif str(sys.argv[1]) == \"sanitize\":\n sanitize(data)\n elif str(sys.argv[1]) == \"benchmark\":\n benchmark(data)\n elif str(sys.argv[1]) == \"prod\":\n prod(data)\n elif str(sys.argv[1]) == \"docker\":\n if len(sys.argv) == 2:\n print(\"docker\")\n elif len(sys.argv) == 3 and sys.argv[2] ==\"build\":\n docker_build(data)\n \n else:\n print(\"invalid arguments\")\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"danikhan632/create-cpp-app","sub_path":"scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"18313809416","text":"# This program - \n# 1. Sends a post request to access dynamic token\n# 2. Use the dynamic (bearer) token to post data \n# Note: my backend server expects a bearer token for auth and a base64 encoded data for processing.\n# 3. Print the reponse received from the post request \n# Note: upon successful authentication, my backend server returns another base64 encoded data)\n\n\nfrom os import system\nimport requests\nimport json\n\nbase64EncodedInput = 'SUkqALItAAD / O6ImgZU7sDwzM6wq//pimOOKQkThU07WGF7TT7wqmHWGAAAFABULgAAGwEFAAEAAA3wwe/iHYnE7W7HFIRexUUZhFYpEDAAEAAAACAAAAAAAAAAIAA='\n\n\n# construct header and payload with clientid, client_secret.\ndef get_headers_for_token_request():\n token_url = 'https://api-internal.pod.myplaygrounddomain.net/apip/auth/v2/token'\n token_header = {'Accept': 'application/json','Content-Type': 'application/json',}\n token_payload = {'client_id': 'oo1nclud3ooyourooclientidoohereoo','client_secret': 'oo1nclud3ooyouroocl13ntooS3cr3toohereoo','grant_type': 'client_credentials',}\n return token_url, token_header, token_payload\n\n\n# construct header and payload with input data for post request\ndef get_headers_for_post(token):\n post_url = \"https://api-internal.pod.myplaygrounddomain.net/v1/p2e/api/invocations\"\n post_headers = {'Accept': 'application/json','Content-type': 'application/json','Authorization': 'Bearer ' + token,}\n post_payload ={'name' : 'what-ever-applicable', 'role' : 'creator', 'public_key' : ''+ base64EncodedInput,}##pay\n return post_url, post_headers, post_payload\n\n\n# a post method - returns a response (token_url is designed to return a bearer token in my environment)\ndef get_token(token_Url, token_header, token_payload):\n token_response = requests.post(token_Url, headers=token_header, json=token_payload)\n return token_response\n\n# another post method - returns a resonse (post_url is designed to return an encoded data)\ndef post_and_get_response(post_Url, post_headers, post_payload ):\n response = requests.post(post_Url, headers=post_headers, json=post_payload)\n return response\n\n\n\nif __name__ == \"__main__\":\n system('cls')\n\n token_url, token_header, token_payload = get_headers_for_token_request()\n resp_token = get_token(token_url, token_header, token_payload).json()\n #print (resp_token)\n token = resp_token[\"access_token\"]##ignore all content except the access_token\n #print (token)\n\n post_Url, post_headers, post_payload = get_headers_for_post(token)\n resp = post_and_get_response(post_Url, post_headers, post_payload).json()\n print (resp)\n","repo_name":"0x218/Python","sub_path":"http/post_with_dynamic_token.py","file_name":"post_with_dynamic_token.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19216358153","text":"\"\"\"An bruteforce agent that repeatedly cycles through all available actions in\norder.\n\nTo run 'tiny' benchmark scenario with default settings, run the following from\nthe nasim/agents dir:\n\n$ python bruteforce_agent.py tiny\n\nThis will run the agent and display progress and final results to stdout.\n\nTo see available running arguments:\n\n$ python bruteforce_agent.py --help\n\"\"\"\n\nfrom itertools import product\n\nimport nasim\n\nLINE_BREAK = \"-\"*60\n\n\ndef run_bruteforce_agent(env, step_limit=1e6, verbose=True):\n \"\"\"Run bruteforce agent on nasim environment.\n\n Parameters\n ----------\n env : nasim.NASimEnv\n the nasim environment to run agent on\n step_limit : int, optional\n the maximum number of steps to run agent for (default=1e6)\n verbose : bool, optional\n whether to print out progress messages or not (default=True)\n\n Returns\n -------\n int\n timesteps agent ran for\n float\n the total reward recieved by agent\n bool\n whether the goal was reached or not\n \"\"\"\n if verbose:\n print(LINE_BREAK)\n print(\"STARTING EPISODE\")\n print(LINE_BREAK)\n print(\"t: Reward\")\n\n env.reset()\n total_reward = 0\n done = False\n env_step_limit_reached = False\n steps = 0\n cycle_complete = False\n\n if env.flat_actions:\n act = 0\n else:\n act_iter = product(*[range(n) for n in env.action_space.nvec])\n\n while not done and not env_step_limit_reached and steps < step_limit:\n if env.flat_actions:\n act = (act + 1) % env.action_space.n\n cycle_complete = (steps > 0 and act == 0)\n else:\n try:\n act = next(act_iter)\n cycle_complete = False\n except StopIteration:\n act_iter = product(*[range(n) for n in env.action_space.nvec])\n act = next(act_iter)\n cycle_complete = True\n\n _, rew, done, env_step_limit_reached, _ = env.step(act)\n total_reward += rew\n\n if cycle_complete and verbose:\n print(f\"{steps}: {total_reward}\")\n steps += 1\n\n if done and verbose:\n print(LINE_BREAK)\n print(\"EPISODE FINISHED\")\n print(LINE_BREAK)\n print(f\"Goal reached = {env.goal_reached()}\")\n print(f\"Total steps = {steps}\")\n print(f\"Total reward = {total_reward}\")\n elif verbose:\n print(LINE_BREAK)\n print(\"STEP LIMIT REACHED\")\n print(LINE_BREAK)\n\n if done:\n done = env.goal_reached()\n\n return steps, total_reward, done\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"env_name\", type=str, help=\"benchmark scenario name\")\n parser.add_argument(\"-s\", \"--seed\", type=int, default=0,\n help=\"random seed\")\n parser.add_argument(\"-o\", \"--partially_obs\", action=\"store_true\",\n help=\"Partially Observable Mode\")\n parser.add_argument(\"-p\", \"--param_actions\", action=\"store_true\",\n help=\"Use Parameterised action space\")\n parser.add_argument(\"-f\", \"--box_obs\", action=\"store_true\",\n help=\"Use 2D observation space\")\n args = parser.parse_args()\n\n nasimenv = nasim.make_benchmark(\n args.env_name,\n args.seed,\n not args.partially_obs,\n not args.param_actions,\n not args.box_obs\n )\n if not args.param_actions:\n print(nasimenv.action_space.n)\n else:\n print(nasimenv.action_space.nvec)\n run_bruteforce_agent(nasimenv)\n","repo_name":"Jjschwartz/NetworkAttackSimulator","sub_path":"nasim/agents/bruteforce_agent.py","file_name":"bruteforce_agent.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"}
+{"seq_id":"41191915103","text":"import FWCore.ParameterSet.Config as cms\n\nhltSingleVertexPixelTrackFilter = cms.EDFilter('HLTSingleVertexPixelTrackFilter',\n saveTags = cms.bool(True),\n vertexCollection = cms.InputTag('hltPixelVerticesForMinBias'),\n trackCollection = cms.InputTag('hltPixelCands'),\n MinPt = cms.double(0.2),\n MaxPt = cms.double(10000),\n MaxEta = cms.double(1),\n MaxVz = cms.double(10),\n MinTrks = cms.int32(30),\n MinSep = cms.double(0.12),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"HLTrigger/special/hltSingleVertexPixelTrackFilter_cfi.py","file_name":"hltSingleVertexPixelTrackFilter_cfi.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"32350412405","text":"\"\"\" Module basemodel.py (By: Charley Zhang, July 2020)\nImplements basic functionality for all models subclasses.\n\"\"\"\n\nimport sys, os\nimport torch, torch.nn as nn\nimport torchsummary\n\n\nclass BaseModel(nn.Module):\n r\"\"\" Pytorch basemodel with useful customized functionalities.\"\"\"\n def __init__(self, *args, **kwargs):\n super(BaseModel, self).__init__(*args, **kwargs)\n\n def forward(self, *args):\n raise NotImplementedError(f\"forward() func requires definition.\")\n\n @property\n def device(self):\n return next(self.parameters()).device if self.parameters() else None\n\n @property\n def param_counts(self):\n tot_params = sum(p.numel() for p in self.parameters())\n tot_train_params = sum(p.numel() for p in self.parameters()\n if p.requires_grad\n )\n return tot_params, tot_train_params\n\n @property\n def size(self):\n r\"\"\" Gets total parameter and buffer memory usage in bytes. \"\"\"\n params_mem = sum(\n [p.nelement() * p.element_size() for p in self.parameters()]\n )\n bufs_mem = sum(\n [buf.nelement() * buf.element_size() for buf in self.buffers()]\n )\n return params_mem + bufs_mem \n\n def summary(self, input_size=(3,256,256), batch_size=-1, device='cpu'):\n if 'cuda' in device:\n device = 'cuda' # summary does not support targ device assignment\n torchsummary.summary(\n self, \n input_size=input_size, \n batch_size=batch_size,\n device=device\n )\n\n \n ","repo_name":"charzharr/3D-medseg-pretraining","sub_path":"src/lib/nets/basemodel.py","file_name":"basemodel.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"26464547020","text":"emails = [\"test.email+alex@leetcode.com\",\"test.e.mail+bob.cathy@leetcode.com\",\"testemail+david@lee.tcode.com\"]\nf=[]\nfor i in emails :\n l=i.split(\"@\")\n n=l[0].split(\"+\")\n s=n[0].replace(\".\",\"\")\n f.append((s+\"+\"+l[1]))\nprint(len(set(f)))\n \n \n","repo_name":"Kshitij269/Questions","sub_path":"929. Unique Email Addresses.py","file_name":"929. Unique Email Addresses.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9002629903","text":"from django.urls import path\nfrom . import views\n\napp_name = 'movies'\n\nurlpatterns = [\n path('', views.movie_list, name='movie_list'),\n path('/', views.detail, name=\"detail\"),\n path('/score/new', views.create_score, name='create_score'),\n path('/score//delete', views.delete_score, name=\"delete\"),\n path('/score//update', views.update_score, name=\"update\"),\n path('dbmake/', views.movie_db),\n path('dbmake2/', views.movie_db2),\n]","repo_name":"Hansung-Lee/SSAFY","sub_path":"ssafy_project/last_project/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"22558309372","text":"import cs50\nimport sys\n\nif len(sys.argv) is not 2:\n print('Please provide the key')\n exit(1)\nelse:\n key = int(sys.argv[1])\n print(\"plaintext: \", end=\"\")\n ptext = cs50.get_string();\n ctext = \"\"\n for char in ptext:\n if char.isalpha():\n if char.isupper():\n x = (ord(char) + key - 65) % 26 + 65\n elif char.islower():\n x = (ord(char) + key - ord('a')) % 26 + ord('a')\n c = chr(x)\n ctext += c\n else:\n ctext += char\n print(\"ciphertext:\",ctext) \n exit(0)","repo_name":"wdlsvnit/SMP-2017-Web","sub_path":"smp2017-web-Darshit/CS50_Solutions/pset6/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27422552922","text":"from requests_oauthlib import OAuth2Session\n\nfrom flask import Flask, request, redirect, session, url_for\nfrom flask.json import jsonify\n\n# This information is obtained upon registration of a new GitHub\nclient_id = \"0oaps3etzD5o6oIBE5d6\"\nclient_secret = \"TB7kYfe86zZl294mhE8UXIO4ofV5gqEkvZBs3Net\"\nauthorization_base_url = 'https://dev-9590480.okta.com/oauth2/v1/authorize'\ntoken_url = 'https://dev-9590480.okta.com/oauth2/v1/token'\n\napp = Flask(__name__)\ng_state = \"\"\n\n@app.route(\"/login\")\ndef login():\n keycloak = OAuth2Session(client_id)\n authorization_url, state = keycloak.authorization_url(authorization_base_url)\n print(f\"authorization_url: {authorization_url}\")\n print(f\"STATE: {state}\")\n \n # State is used to prevent CSRF, keep this for later.\n session['oauth_state'] = state\n g_state = state\n return redirect(authorization_url)\n\n@app.route(\"/callback\")\ndef callback():\n # keycloak = OAuth2Session(client_id, state=session['oauth_state'])\n keycloak = OAuth2Session(client_id)\n token = keycloak.fetch_token(token_url, client_secret=client_secret,\n authorization_response=request.url)\n\n print(f\"TOKEN: {token}\")\n res = jsonify(keycloak.get('https://dev-9590480.okta.com/oauth2/v1/userinfo').json())\n print(f\"USER : {res}\")\n return res\n\n@app.route(\"/test\")\ndef test():\n return \"test result\"\n\nif __name__ == '__main__':\n app.run(port=5000,debug=True)\n","repo_name":"shawnhankim/cori-nginx","sub_path":"auth/oauth2.0/flask/01-flask-example.py","file_name":"01-flask-example.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15067869809","text":"# # FOR: ciclu repetitiv in care stim cati pasi vom efectua\n# # for index in range(n):\n# # fa ceva cu index\n#\n# # range = functie care ne da un numar de pasi: 0, 1, 2, 3, .... n\n# # range(n) - n va fi parametrul de stop\n# # range(start, stop) - start, start+1, start+2, .... stop\n# # range(start, stop, pas) - start, start+pas, start+2*pas, ... stop\n# for idx in range(10):\n# print(idx)\n#\n# print(\"*\" * 80)\n# # Exemplu: afisare elemente lista unul cate unul\n# # Putem folosi numerele oferite de functia range ca si indici pentru a accesa elementele din lista noastra\n# lst = ['luni', 'marti', 'miercuri', 'joi', 'vineri']\n# for idx in range(len(lst)):\n# print(lst[idx])\n#\n# print(\"*\" * 80)\n# for idx in range(1, 10, 2):\n# print(idx)\n#\n# print(\"*\" * 80)\n# # Iterare peste colectii Python\n# # for element in collection:\n# # fa ceva cu element\n# for zi in lst:\n# print(zi)\n\n# Exemplu: gasirea unui numar intr-o lista data\nmy_list = [34, 12, 7, 5, 99, 100]\nnumber_to_find = 77\nnumber_found = False\n\nfor number in my_list:\n print(f\"Testam valoarea {number}\")\n if number == number_to_find:\n print(\"Am gasit numarul cautat in lista!\")\n number_found = True\n break\n\nif not number_found:\n print(f\"{number_to_find} nu se gaseste in lista!\")\n\nprint(\"Am terminat for-ul!\")\n\n# Ce am scris mai sus se poate scrie asa\nfor number in my_list:\n if number == number_to_find:\n print(\"Am gasit!\")\n break\nelse:\n # Ramura else de la for se va executa daca in timpul for-ului nu am ajuns deloc la o instructiune BREAK\n print(\"Nu am gasit numarul cautat!\")\n\n\nprint(\"=\"*80)\n# Continue - sari peste codul ce urmeaza la iteratia urmatoare\n# Exemplu: afisare numere impare prin \"sarirea\" peste cele pare\nfor i in range(10):\n print(f\"Verificam {i}\")\n if i % 2 == 0:\n # Daca am un numar par, vreau sa sar la urmatorul numar\n print(f\"{i} este par, SARIM peste el\")\n continue\n print(f\"{i} este numar impar\")\n\n# Unirea a 2 liste, iterand peste una dintre ele si folosind append\nl1 = [4, 3, 54, 6, 12, 0]\nl2 = [43, 5, 9, 99, 100, 2, 3]\n# l1.extend(l2)\nfor element in l2:\n l1.append(element)\n","repo_name":"Alx152/curs_TA18_ITF_Adela","sub_path":"curs4_cicluri_repetitive/for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74482868993","text":"import re\nimport datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport logging\nfrom typing import Tuple, List\nimport os\nfrom Producto import Producto\n\nHEADERS = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/107.0.0.0 Safari/537.36 '\n }\n\nexecution_datetime = datetime.datetime.now()\n\n\ndef get_xml_page(url: str) -> BeautifulSoup:\n \"\"\"\n :param url:\n :return: Devuelve un objeto BeautifulSoup para operar con la pagina cargada\n \"\"\"\n\n session = requests.Session()\n page = session.get(url, headers=HEADERS)\n soup = BeautifulSoup(page.content, features='xml')\n\n return soup\n\n\ndef get_html_page(url: str) -> BeautifulSoup:\n \"\"\"\n :param url:\n :return: Devuelve un objeto BeautifulSoup para operar con la pagina cargada\n \"\"\"\n\n session = requests.Session()\n # Se simula navegacion humana, con retraso de 10x el tiempo del request.\n t0 = time.time()\n page = session.get(url, headers=HEADERS)\n delay = time.time() - t0\n time.sleep(0.2 * delay)\n soup = BeautifulSoup(page.content, features='html.parser')\n\n return soup\n\n\ndef get_info_from_url(url: str) -> Producto:\n \"\"\"\n param url: url address to scrap\n return: dic with scrapped information.\n raise ProductoIncorrectoException: when coudn't fetch any product information\n \"\"\"\n\n page = get_html_page(url)\n\n producto = Producto()\n\n producto.product_id = str(url.split('/')[-1])\n producto.price = __obtain_price(page)\n producto.product, producto.brand = __obtain_name(page)\n producto.unit_price, producto.units = __obtain_price_per_unit(page)\n producto.categories = __obtain_categories(page)\n producto.discount = __obtain_discount(page)\n producto.date = execution_datetime.date()\n\n # comprobamos si hay informacion missing.\n if any([producto.price is None, producto.product is None, producto.brand is None,\n producto.unit_price is None, producto.units is None]):\n logging.warning(f\"{url} failed. Missing information.\")\n raise ProductoIncorrectoException(f\"Producto_id: {producto.product_id}\")\n\n return producto\n\n\ndef __obtain_name(page: BeautifulSoup) -> Tuple[str, str]:\n fetched_product = page.find_all(\"h1\", class_=\"product-title\")\n try:\n product_name = [process_name(product.text) for product in fetched_product][0]\n brand = [__process_brand(product.text) for product in fetched_product][0]\n except (IndexError, AttributeError):\n logging.warning('Product name not found')\n product_name = None\n brand = None\n return product_name, brand\n\n\ndef __obtain_price(page: BeautifulSoup) -> float:\n try:\n fetched_price = page.find_all(\"p\", class_=\"buy-box__active-price\")\n price = float([__process_price(price.text) for price in fetched_price][0])\n except (IndexError, AttributeError):\n logging.warning('Product price not found')\n price = None\n return price\n\n\ndef __obtain_categories(page: BeautifulSoup) -> List[str]:\n fetched_categories = page.find_all(\"span\", class_=\"breadcrumb-item__link\")\n try:\n categories = [__preprocess_str(category.text) for category in fetched_categories]\n except AttributeError:\n categories = None\n return categories\n\n\ndef __obtain_price_per_unit(page: BeautifulSoup) -> Tuple[float, str]:\n fetched_unit_prices = page.find_all(\"p\", \"buy-box__price-per-unit\")\n try:\n price = float([__process_price(unit_price.text) for unit_price in fetched_unit_prices][0])\n units = [__process_unit_price(unit_price.text) for unit_price in fetched_unit_prices][0]\n except (IndexError, AttributeError):\n logging.warning('Unit price not found')\n price = None\n units = None\n return price, units\n\n\ndef __obtain_discount(page: BeautifulSoup) -> str:\n try:\n fetched_discount = page.find_all(\"span\", \"product_details_promotion_description\")\n discount_percentage = [__process_discount(discount.text) for discount in fetched_discount][0]\n except (IndexError, AttributeError):\n discount_percentage = None\n return discount_percentage\n\n\ndef create_data_folder():\n today = str(datetime.date.today()).replace('-', '')\n data_path = os.path.join(os.getcwd(), '../..', 'dataset', today)\n os.makedirs(os.path.join(data_path, 'tmp'), exist_ok=True)\n return data_path\n\n\ndef __preprocess_str(text: str) -> str:\n rm_chars = [\"\\r\", \"\\n\", \"\\t\"]\n for char in rm_chars:\n text = text.replace(char, \"\")\n return text.replace(\",\", \".\").strip()\n\n\ndef __process_unit_price(text: str) -> str:\n match = re.search('€.+$', text).group().strip()\n return match\n\n\ndef __process_price(text: str) -> str:\n match = re.search('\\\\d+,\\\\d+', text).group().strip()\n return match.replace(\",\", \".\")\n\n\ndef __process_discount(text: str) -> str:\n match = re.search('\\\\b\\\\d+%', text).group().strip()\n return match.replace(\",\", \".\")\n\n\ndef __process_brand(text: str) -> str:\n text = __preprocess_str(text)\n match = re.findall('[A-Z]\\\\w+', text)\n\n return match[1]\n\n\ndef process_name(text: str) -> str:\n text = __preprocess_str(text)\n # match = re.findall('[A-Z][a-z áéíóú]+', text)\n match = re.findall('.*', text)\n\n return match[0]\n\n\ndef __print_page(page: BeautifulSoup, ruta: str):\n \"\"\"\n imprime la pagina escrapeada en la ruta correspondiente.\n \"\"\"\n with open(ruta, \"w\", encoding=\"utf-8\") as f:\n f.write(page.prettify())\n\n\nclass ProductoIncorrectoException(Exception):\n ...\n","repo_name":"cperezh/FoodECommerceScraper","sub_path":"source/Producto_DIA_DP/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16167912964","text":"name = input(\"Enter your name: \")\r\ncorrect = 0\r\nwrong = 0\r\nj = 0\r\nguess = \"\"\r\nprint(\"Welcome to guessing game \")\r\nprint(\"Rules: \\n 1.You have guess movie name for each correct guess +1 will be awarded \\n 2.If guessed 2 times continously games exits \\n 3.For each wrong guess -1 will be awarded\")\r\nprint(\"Your game begins now: \\n\\n\\n\")\r\nfor i in range(0,10):\r\n if(j < 2):\r\n if(i == 0):\r\n print(\"__e _a__ __i___\")\r\n x = input(\"Enter yoyr answer: \")\r\n if(x == \"The dark knight\" or x == \"the dark knight\"):\r\n correct = correct + 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else:\r\n wrong = wrong + 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 1):\r\n print(\"_i_i_e_ _a_e\")\r\n x = input(\"Enter you answer: \")\r\n if(x == \"citizen kane\" or x == \"Citizen kane\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else:\r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 2):\r\n print(\"_u__ _i__io_\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"Pulp fiction\" or x == \"pulp fiction\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 3):\r\n print(\"_a_\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"War\" or x == \"war\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 4):\r\n print(\"_e__i_o\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"Vertigo\" or x == \"vertigo\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 5):\r\n print(\"_i__ _i____\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"City lights\" or x == \"city lights\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 6):\r\n print(\"__e__ai e___e__\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"Chennai express\" or x == \"chennai express\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 7):\r\n print(\"_i__a_e\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"Dilwale\" or x == \"dilwale\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n elif(i == 8):\r\n print(\"_ i_io__\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"3 idiots\" or x == \"3 Idiots\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n else:\r\n print(\"_a__a_\")\r\n x = input(\"Enter your answer: \")\r\n if(x == \"Dangal\" or x == \"dangal\"):\r\n correct += 1\r\n print(\"Correct answer\")\r\n j = 0\r\n else: \r\n wrong += 1\r\n j+=1\r\n print(\"wrong answer\")\r\n else:\r\n print(\"Sorry you entered more than 1 wrong answer continously!!\")\r\n break\r\nif(correct < wrong):\r\n print(\"Hello! \",name,\" you enetered \",correct,\" correct answer and \",wrong,\" wrong answer\")\r\n print(\"You fail ! try again next time \\nBye\")\r\nelse:\r\n totalscore = correct - wrong \r\n print(\"Hello! \",name,\" you enetered \",correct,\" correct answer and \",wrong,\" wrong answer\")\r\n print(\"Your totale score: \",totalscore)\r\n print(\"See you again!! \\nBye\")\r\n\r\n \r\n ","repo_name":"Jashaann/Movie-Guessing-Game","sub_path":"guessgame.py","file_name":"guessgame.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6772135836","text":"#(c) 2012 Massachusetts Institute of Technology. All Rights Reserved\n# Code written by: Maksim Imakaev (imakaev@mit.edu)\n\n\"\"\"\nSome important utilities from Max. This includes:\n\nSet exception hook to pdb\nRun in separate process\nfork-map\nfork-map-reduce\nfork-map-average\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport sys\nimport pickle\nimport pdb\nimport traceback\nimport warnings\nimport subprocess\nimport numpy as np\nfrom copy import copy\nimport logging\nfrom functools import reduce\nlog = logging.getLogger(__name__)\n\n\ndef commandExists(command):\n \"checks if the bash command exists\"\n command = command.split()[0]\n if subprocess.call(['which', command]) != 0:\n return False\n return True\n\ndef gzipWriter(filename, pigzArguments=(\"-4\",)):\n \"\"\"\n creates a writing process with gzip or parallel gzip (pigz) attached to it\n \"\"\"\n filename = os.path.abspath(filename)\n with open(filename, 'wb') as outFile:\n if commandExists(\"pigz\"):\n writer = [\"pigz\", \"-c\"] + list(pigzArguments)\n else:\n writer = [\"gzip\", \"-c\", \"-1\"]\n warnings.warn(\"Please install 'pigz' parallel gzip for faster speed\")\n\n pwrite = subprocess.Popen(writer, stdin=subprocess.PIPE, stdout=outFile, shell=False, bufsize=-1)\n log.info(\"\"\"Writer created with command \"{0}\" \"\"\".format(writer))\n return pwrite\n\ndef _exceptionHook(infoType, value, tb):\n \"Exception hook\"\n traceback.print_exception(infoType, value, tb)\n print()\n pdb.post_mortem(tb)\n\n\ndef setExceptionHook():\n \"sets exception hook to pdb\"\n sys.excepthook = _exceptionHook\n\n\nclass transparentDict(dict): # transparent dictionary, that returns the key\n def __missing__(self, key):\n return key\n\n\ndef run_in_separate_process(func, *args, **kwds):\n pread, pwrite = os.pipe()\n pid = os.fork()\n if pid > 0:\n os.close(pwrite)\n with os.fdopen(pread, 'rb') as f:\n status, result = pickle.load(f)\n os.waitpid(pid, 0)\n if status == 0:\n return result\n else:\n raise result\n else:\n os.close(pread)\n try:\n result = func(*args, **kwds)\n status = 0\n except Exception as exc:\n result = exc\n status = 1\n with os.fdopen(pwrite, 'wb') as f:\n try:\n pickle.dump((status, result), f, pickle.HIGHEST_PROTOCOL)\n except pickle.PicklingError as exc:\n pickle.dump((2, exc), f, pickle.HIGHEST_PROTOCOL)\n os._exit(0)\n\n\ndef deprecate(newFunction, oldFunctionName=None, message=None):\n \"\"\"If you rename your function, you can use this to issue deprecation warning for the old name\n Juse use newFunction = deprecate(oldFunction)\"\"\"\n try:\n newName = newFunction.__name__\n except:\n newName = \"_UndeterminedName_\"\n if oldFunctionName is None:\n oldFunctionName = \"_UnspecifiedName_\"\n if message == None:\n message = \"Function %s was renamed to %s\" % (\n oldFunctionName, newName)\n\n def oldFunction(*args, **kwargs):\n warnings.warn(message)\n return newFunction(*args, **kwargs)\n return oldFunction\n\ndef _nprocessors():\n if sys.platform == 'darwin':\n try:\n from multiprocessing import cpu_count\n return cpu_count()\n except NotImplementedError:\n pass\n else:\n # Cygwin (Windows) and Linuxes\n # Could try sysconf(_SC_NPROCESSORS_ONLN) (LSB) next. Instead, count processors in cpuinfo.\n try:\n s = open('/proc/cpuinfo', 'r').read()\n return s.replace(' ', '').replace('\\t', '').count('processor:')\n except:\n pass\n return 4\n\nnproc = _nprocessors()\n\ndef fmap(f, *a, **kw):\n \"\"\"\n forkmap.map(..., n=nprocessors), same as map(...).\n n must be a keyword arg; default n is number of physical processors.\n \"\"\"\n n = max([kw.get(i, 0) for i in ['n','N', \"nproc\", \"Nproc\", \"NProc\"]])\n if n == 0:\n n = nproc\n\n if n == 1:\n return list(map(f, *a))\n\n L = list(zip(*a))\n n = min(n, len(L))\n\n ans = [None] * len(L)\n pipes = [os.pipe() for i in range(n - 1)]\n\n for i in range(n):\n if i < n - 1 and not os.fork(): # Child, and not last processor\n try:\n try:\n obj = [f(*x) for x in L[i::n]]\n except Exception as obj:\n pass\n with os.fdopen(pipes[i][1],'wb') as f:\n pickle.dump(obj,f, protocol=pickle.HIGHEST_PROTOCOL)\n except:\n traceback.print_exc()\n finally:\n os._exit(0)\n elif i == n - 1: # parent\n try:\n ans[i::n] = [f(*x) for x in L[i::n]]\n for k in range(n - 1):\n with os.fdopen(pipes[k][0],'rb') as f:\n obj = pickle.load(f)\n if isinstance(obj, Exception):\n raise obj\n ans[k::n] = obj\n finally:\n for j in range(n - 1):\n os.wait()\n return ans\n\n\n\n\ndef _testFmap():\n\n for i in range(1, 300):\n print(i)\n a = list(range(i))\n for j in range(1, 10):\n b = fmap(lambda x:x, a, n=j)\n assert (np.array(a) == np.array(b)).all()\n\ndef _fmapredcount(function, data, reduction=lambda x, y: x + y, n=4, exceptionList=[IOError]):\n \"\"\"fork-map-reduce\n Performs fork-map of function on data, automatically reducing the data inside each worker.\n If evaluation throws the exception from exceptionList, this results are simply ignored\n \"\"\"\n def funsum(x, y):\n \"\"\"reduces two x[0],y[0], keeping track of # of\n successful evaluations that were made\n Also keeps track of None's that can occur if evaluation failed\"\"\"\n if x is None:\n if y is None:\n return None\n else:\n return y\n else:\n if y is None:\n return x\n else:\n return (reduction(x[0], y[0]), x[1] + y[1])\n\n def newfunction(x):\n try:\n \"if function is evaluated, it was evaluated one time\"\n return function(x), 1\n except tuple(exceptionList):\n return None\n\n if len(data) < n:\n n = len(data)\n datas = []\n\n for i in range(n):\n datas.append(copy(data[i::n])) # split like that if beginning and end of the array have different evaluation time\n\n def worker(dataList):\n dataList[0] = newfunction(dataList[0])\n return reduce(lambda z, y: funsum(z, newfunction(y)), dataList) # reducing newfunction with our new reduction algorithm\n\n reduced = fmap(worker, datas, n=n)\n return reduce(funsum, reduced)\n\n\ndef fmapred(function, data, reduction=lambda x, y: x + y, n=4, exceptionList=[IOError]):\n \"\"\"reduces two x[0],y[0], keeping track of # of\n successful evaluations that were made\n Also ignores failed evaluations with exceptions from exceptionList.\n\n Parameters\n ----------\n function : function\n function to be applied to the data\n data : iterable\n input data\n reduction : function, optional\n Reduction function. By default - sum\n n : int, optional\n number of CPUs\n exceptionList : list, optional\n list of exceptions to be ignored during reduction. By default, only IOError is ignored.\n \"\"\"\n return _fmapredcount(function, data, reduction=reduction, n=n, exceptionList=exceptionList)[0]\n\n\ndef fmapav(function, data, reduction=lambda x, y: x + y, n=4, exceptionList=[IOError]):\n \"\"\"Calculates averate of [fucntion(i) for i in data]\n Also ignores failed evaluations with exceptions from exceptionList.\n\n Parameters\n ----------\n function : function\n function to be applied to the data\n data : iterable\n input data\n reduction : function, optional\n Reduction function. By default - sum\n n : int, optional\n number of CPUs\n exceptionList : list, optional\n list of exceptions to be ignored during reduction. By default, only IOError is ignored.\n \"\"\"\n\n a = _fmapredcount(function, data, reduction=reduction, n=n,\n exceptionList=exceptionList)\n return a[0] / float(a[1])\n","repo_name":"wangyibin/TDGP","sub_path":"apps/systemutils.py","file_name":"systemutils.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"37003039096","text":"import random\nfrom Card import Card\n\n# define global values and rank for cards\nSuits = ('Spades', 'Hearts', 'Clubs' , 'Diamonds')\nRanks = ('ACE', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K')\nRank_Values = {'ACE':1, '2':2, '3':3 ,'4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10, 'Q':10, 'K':10}\n\n\nclass Deck:\n\t'''\n\tThis class implements the deck of playing cards, including member function for initializing,\n\tshuffling, and dealing from a deck.\n\t'''\n\tdef __init__(self):\n\t\t'''\n\t\tCreates the standard deck of 52 cards\n\t\t'''\n\t\tself.cards = [Card(suit,rank) for suit in Suits for rank in Ranks] \n\t\trandom.shuffle(self.cards)\n\n\n\tdef card_shuffle(self):\n\t\t'''\n\t\tShuffle the cards in the deck.\n\t\t'''\n\t\trandom.shuffle(self.cards)\n\n\n\tdef deal_card(self):\n\t\t'''\n\t\tReturn the card from top of the deck.\n\t\t'''\n\t\ttry:\n\t\t\treturn self.cards.pop(0)\n\n\t\texcept IndexError as e:\n\t\t\tprint(\"The deck is out of cards: {e}\".format(e))\n\n\n\tdef __str__(self):\n\t\tcard_str = \"\"\n\t\tcard_str = [card_str+str(c) for c in self.cards]\n\t\treturn \"\".join(card_str)\n","repo_name":"bkpathak/blackjack","sub_path":"src/Deck.py","file_name":"Deck.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17615531213","text":"import Migrate\n\ndef addMenuItems( dmd, menuId, menuItems ):\n dsMenu = dmd.zenMenus._getOb( menuId, None) \n for menuItem in menuItems:\n if dsMenu and not dsMenu.zenMenuItems._getOb( menuItem['id'], None):\n dsMenu.manage_addZenMenuItem( **menuItem )\n\n\nclass removeTemplateMenuItem(Migrate.Step):\n version = Migrate.Version(3, 0, 0)\n \n def cutover(self, dmd):\n \n items = dmd.zenMenus._getOb('PerformanceMonitor_list').zenMenuItems\n if hasattr(items, 'performanceTemplates'): \n items._delObject('performanceTemplates')\n addMenuItems( dmd, 'PerformanceMonitor_list', [\n { 'action': 'rrdTemplates/PerformanceConf/viewRRDTemplate',\n 'description': 'Performance Template',\n 'id': 'performanceConf',\n 'isdialog': False,\n 'isglobal': True,\n 'ordering': 16.0,\n 'permissions': ('View Device',) } ] )\n\n\nremoveTemplateMenuItem()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/migrate/removeTemplateMenuItem.py","file_name":"removeTemplateMenuItem.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"17620583033","text":"from zope import interface, component\nfrom Products.Five.viewlet.manager import ViewletManagerBase\nfrom Products.ZenUtils.jsonutils import json\nfrom Products.Five.viewlet import viewlet\nfrom interfaces import ISecurityManager, IPermissionsDeclarationViewlet\nfrom AccessControl import getSecurityManager\nfrom Products.ZenUtils.guid.interfaces import IGlobalIdentifier\nfrom Products.Zuul.interfaces import IAuthorizationTool\nfrom collective.beaker.interfaces import ISession\n\nZAUTH_COOKIE = 'ZAuthToken'\n\nclass SecurityManager(ViewletManagerBase):\n \"\"\"The Viewlet manager class for the permissions declaration\n \"\"\"\n interface.implements(ISecurityManager)\n\n\ndef permissionsForContext(context):\n \"\"\"\n Given a context (zope object) returns all the permissions\n the logged in user has.\n \"\"\"\n manager = getSecurityManager()\n all_permissions = context.zport.acl_users.possible_permissions()\n\n # filter out the ones we have in this context\n valid_permissions = [permission for permission in all_permissions\n if manager.checkPermission(permission, context)]\n\n # turn the list into a dictionary to make it easier to look up on\n # the client side (just look up the key instead of iterating)\n perms = {}\n for permission in valid_permissions:\n perms[permission.lower()] = True\n return perms\n\nclass PermissionsDeclaration(viewlet.ViewletBase):\n \"\"\"This is responsible for sending to the client side\n which permissions the user has\n \"\"\"\n interface.implements(IPermissionsDeclarationViewlet)\n\n def render(self):\n \"\"\"Creates a global function in JavaScript that returns the\n json encoding of all the permissions available to the current\n user in the current context. The permissions will be in the\n form of a dictionary.\n \"\"\"\n self._setAuthorizationCookie()\n permissions = self.permissionsForCurrentContext()\n managedObjectGuids = self.getManagedObjectGuids(returnChildrenForRootObj=True)\n data = json(permissions)\n func = \"\"\"\n\n \"\"\" % (data, json(managedObjectGuids), str(self.hasGlobalRoles()).lower())\n return func\n\n def _setAuthorizationCookie(self):\n session = ISession(self.context.REQUEST)\n authorization = IAuthorizationTool(self.context)\n token = authorization.createAuthToken(self.request)\n\n self.request.response.setCookie(ZAUTH_COOKIE, token['id'], path=\"/\", secure=session.secure, http_only=True)\n\n def hasGlobalRoles(self):\n \"\"\"\n @return True/False if the user has global roles\n \"\"\"\n us = self.context.dmd.ZenUsers.getUserSettings()\n return not us.hasNoGlobalRoles()\n\n def permissionsForCurrentContext(self):\n \"\"\"Given a context return a list of all the permissions the logged in\n user has.\n \"\"\"\n return permissionsForContext(self.context)\n\n def getManagedObjectGuids(self, returnChildrenForRootObj=False):\n \"\"\"\n If the currently logged in user is a restricted user this will return\n all of the guids for items he can administer.\n \"\"\"\n guids = []\n us = self.context.dmd.ZenUsers.getUserSettings()\n if us.hasNoGlobalRoles():\n guids = us.getAllAdminGuids(returnChildrenForRootObj=returnChildrenForRootObj)\n return guids\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenUI3/security/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"74875765315","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom jobs.models import Job\nimport json\nfrom datetime import datetime, date\nimport dateparser\n\n\nclass Command(BaseCommand):\n help = 'Set up the database'\n\n def handle(self, *args: str, **options: str):\n with open('static/210521chambajobs.json', 'r') as handle:\n big_json = json.loads(handle.read())\n for item in big_json:\n if len(item['description']) == 0:\n print('Not created. Description empty')\n continue\n\n if item['publication_date'] != None:\n dt = dateparser.parse(item['publication_date'])\n else:\n dt = datetime.now()\n\n new_date = date(dt.year, dt.month, dt.day)\n\n existing_job = Job.objects.filter(\n\n job_title = item['job_title'],\n company = item['company'],\n company_url = item['company_url'],\n description = item['description'],\n publication_date = new_date,\n salary = item['salary'],\n city = item['city'],\n district = item['district'],\n job_url = item['job_url'],\n job_type = item['job_type'],\n\n )\n if existing_job.exists():\n print('This Job already exist')\n else:\n Job.objects.create(\n\n job_title = item['job_title'],\n company = item['company'],\n company_url = item['company_url'],\n description = item['description'],\n publication_date = new_date,\n salary = item['salary'],\n city = item['city'],\n district = item['district'],\n job_url = item['job_url'],\n job_type = item['job_type'],\n\n )\n\n self.stdout.write(self.style.SUCCESS('added jobs!'))\n","repo_name":"dgpb/jobarts","sub_path":"jobs/management/commands/addjobs.py","file_name":"addjobs.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4525189528","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\n# to add app_name= \" something \" then add the namespace=\" something \" in url of the app in root directory urls.py page\r\napp_name = 'polls'\r\nurlpatterns = [\r\n # :8000/list/ not working gives error 404 not found.\r\n # works with :8000/polls/list/\r\n path('list/', views.polls_list, name='list'),\r\n\r\n # to add new poll in polls list.\r\n path('new/', views.new_poll, name='new'),\r\n\r\n # to edit the existing poll in polls list. (polls/edit/1/)\r\n path('edit/', views.edit_poll, name='edit_poll'),\r\n\r\n # to delete poll\r\n path('delete/poll/', views.delete_poll, name='delete_poll'),\r\n\r\n # to add new choice\r\n path('edit//choice/add/', views.add_choice, name='add_choice'),\r\n\r\n # to edit choice\r\n path('edit/choice//', views.edit_choice, name='edit_choice'),\r\n\r\n # to delete a choice\r\n path('delete/choice//', views.delete_choice, name='delete_choice'),\r\n\r\n # for polls/details/1/ note:it will give details about polls question\r\n path('details//', views.poll_detail, name='detail'),\r\n\r\n # for form action after voting\r\n # polls/details/1/vote/\r\n path('details//vote/', views.poll_vote, name='vote'),\r\n\r\n]\r\n","repo_name":"hedagaurav/PollMe","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21991798063","text":"import unittest\nimport Problem012\n#from Problem021 import AmicableNumbers\n\nclass TriangleNumbersTest(unittest.TestCase):\n\tdef testgetNumForDivCnt(self):\n\t\tp = Problem012.TriangleNumbers()\n\t\tself.assertEqual(p.getNumForDivCnt(6), (28, [28.0, 14.0, 7.0, 4.0, 2.0, 1.0]))\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"vrednyydragon/ProjectEuler","sub_path":"py_src/Problem012_test.py","file_name":"Problem012_test.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37122353918","text":"from utils import *\nfrom nbnn import *\nfrom nbnn.voc import *\nimport sys, cPickle\nfrom ConfigParser import RawConfigParser\n\nif __name__ == '__main__':\n # Get config settings\n if len(sys.argv) < 5:\n raise Exception(\"arguments expected: cfgfile batch_no class\")\n configfile = sys.argv[1]\n tmpfile = sys.argv[2]\n batch_no = int(sys.argv[3])\n cls = sys.argv[4]\n \n VOCopts = VOC.fromConfig(configfile)\n DESCRopts, NBNNopts, TESTopts = getopts(configfile, tmpfile)\n\n # Setup logger\n if batch_no == 1:\n mode = 'w'\n else:\n mode = 'a'\n log = init_log(TESTopts['log_path'], cls, mode)\n \n log.info(\"TEST cfg:%s, batch_no:%d, cls:%s\",configfile, batch_no,cls)\n\n log.info('==== LOAD IMAGE PICKLE ====')\n with open(TESTopts['img_pickle_path']%batch_no,'rb') as pklf:\n images = cPickle.load(pklf)\n \n log.info('==== INIT DESCRIPTOR FUNCTION ====')\n descriptor_function = descriptor.DescriptorUint8(**DESCRopts)\n log.info('==== INIT ESTIMATOR ====')\n estimator = nbnn.NBNNEstimator(**NBNNopts)\n \n log.info('==== LOAD IMAGE DESCRIPTORS ====')\n descriptors = get_image_descriptors(images, descriptor_function, \\\n TESTopts['descriptor_path'])\n log.info('==== GET ESTIMATES ====')\n distances = estimator.get_estimates([cls], [d for p,d in descriptors.values()])\n log.info('==== GET CONFIDENCE VALUES ====')\n conf_vals = get_confidence_values(distances)\n log.info('== SAVE CONFIDENCE VALUES ==')\n save_results_to_file(TESTopts['result_path']%cls, images, conf_vals)\n \n","repo_name":"mvandervelden/Masterthesis","sub_path":"voc_cls_test.py","file_name":"voc_cls_test.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"28419865102","text":"# -*- coding:utf-8 -*-\nimport argparse\nimport os\nfrom datetime import datetime\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom math import ceil\n#from scipy import interpolate\nimport cv2\n\n# PyTorch\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torchvision # 画像処理関連\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nfrom tensorboardX import SummaryWriter\n\n# 自作クラス\nfrom networks import ProgressiveGenerator, ProgressiveDiscriminator\nfrom utils import save_checkpoint, load_checkpoint\nfrom utils import board_add_image, board_add_images\nfrom utils import save_image_historys_gif\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--exper_name\", default=\"PGGAN_train\", help=\"実験名\")\n parser.add_argument('--device', choices=['cpu', 'gpu'], default=\"gpu\", help=\"使用デバイス (CPU or GPU)\")\n #parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') \n parser.add_argument('--dataset', choices=['mnist', 'cifar-10'], default=\"mnist\", help=\"データセットの種類(MNIST or CIFAR-10)\")\n parser.add_argument('--dataset_dir', type=str, default=\"dataset\", help=\"データセットのディレクトリ\")\n parser.add_argument('--results_dir', type=str, default=\"results\", help=\"生成画像の出力ディレクトリ\")\n parser.add_argument('--load_checkpoints_dir', type=str, default=\"\", help=\"モデルの読み込みディレクトリ\")\n parser.add_argument('--n_samplings', type=int, default=100, help=\"サンプリング数\")\n parser.add_argument('--batch_size', type=int, default=63, help=\"バッチサイズ\")\n parser.add_argument(\"--init_image_size\", type = int, default = 4 )\n parser.add_argument(\"--final_image_size\", type = int, default = 32 )\n parser.add_argument('--n_input_noize_z', type=int, default=128, help=\"生成器に入力するノイズ z の次数\")\n parser.add_argument(\"--fps\", type=float, default=30.0, help=\"モーフィング動画のFPS\")\n parser.add_argument('--codec', choices=['mp4','gif'], default=\"mp4\", help=\"動画のコーデック\")\n parser.add_argument(\"--seed\", type=int, default=0, help=\"乱数シード値\")\n parser.add_argument('--debug', action='store_true', help=\"デバッグモード有効化\")\n args = parser.parse_args()\n\n # 実行条件の出力\n print( \"----------------------------------------------\" )\n print( \"実行条件\" )\n print( \"----------------------------------------------\" )\n print( \"開始時間:\", datetime.now() )\n print( \"PyTorch version :\", torch.__version__ )\n for key, value in vars(args).items():\n print('%s: %s' % (str(key), str(value)))\n\n # 実行 Device の設定\n if( args.device == \"gpu\" ):\n use_cuda = torch.cuda.is_available()\n if( use_cuda == True ):\n device = torch.device( \"cuda\" )\n #torch.cuda.set_device(args.gpu_ids[0])\n print( \"実行デバイス :\", device)\n print( \"GPU名 :\", torch.cuda.get_device_name(device))\n print(\"torch.cuda.current_device() =\", torch.cuda.current_device())\n else:\n print( \"can't using gpu.\" )\n device = torch.device( \"cpu\" )\n print( \"実行デバイス :\", device)\n else:\n device = torch.device( \"cpu\" )\n print( \"実行デバイス :\", device)\n\n print('-------------- End ----------------------------')\n\n # 各種出力ディレクトリ\n if not( os.path.exists(args.results_dir) ):\n os.mkdir(args.results_dir)\n if not( os.path.exists(os.path.join(args.results_dir, args.exper_name)) ):\n os.mkdir( os.path.join(args.results_dir, args.exper_name) )\n\n # seed 値の固定\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n #======================================================================\n # データセットを読み込み or 生成\n # データの前処理\n #======================================================================\n pass\n\n #======================================================================\n # モデルの構造を定義する。\n #======================================================================\n # Genrator\n if( args.dataset == \"mnist\" ):\n model_G = ProgressiveGenerator(\n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_input_noize_z = args.n_input_noize_z,\n n_rgb = 1,\n ).to( device )\n else:\n model_G = ProgressiveGenerator(\n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_input_noize_z = args.n_input_noize_z,\n n_rgb = 3,\n ).to( device )\n\n # Discriminator\n if( args.dataset == \"mnist\" ):\n model_D = ProgressiveDiscriminator(\n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_fmaps = args.n_input_noize_z,\n n_rgb = 1,\n ).to( device )\n else:\n model_D = ProgressiveDiscriminator( \n init_image_size = args.init_image_size,\n final_image_size = args.final_image_size,\n n_fmaps = args.n_input_noize_z,\n n_rgb = 3,\n ).to( device )\n \n if( args.debug ):\n print( \"model_G :\\n\", model_G )\n print( \"model_D :\\n\", model_D )\n\n # モデルを読み込む\n if not args.load_checkpoints_dir == '' and os.path.exists(args.load_checkpoints_dir):\n init_step = load_checkpoint(model_G, device, os.path.join(args.load_checkpoints_dir, \"G\", \"G_final.pth\") )\n init_step = load_checkpoint(model_D, device, os.path.join(args.load_checkpoints_dir, \"D\", \"D_final.pth\") )\n\n #======================================================================\n # モデルの学習処理\n #======================================================================\n # 入力ノイズ z\n input_noize_z1 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n input_noize_z2 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n #input_noize_z3 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n #input_noize_z4 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n #input_noize_z5 = torch.randn( size = (args.batch_size, args.n_input_noize_z,1,1) ).to( device )\n\n #\n final_progress = float(np.log2(args.final_image_size)) -2\n\n #======================================================================\n # モーフィング(z1 -> z2)\n #======================================================================\n input_noize_z_src = input_noize_z1\n input_noize_z_target = input_noize_z2\n dz = ( input_noize_z_target - input_noize_z_src ) / args.n_samplings\n if( args.debug ):\n print( \"input_noize_z_src[0,0:10,0,0]\", input_noize_z_src[0,0:10,0,0] )\n print( \"input_noize_z_target[0,0:10,0,0]\", input_noize_z_target[0,0:10,0,0] )\n print( \"dz[0,0:10,0,0]\", dz[0,0:10,0,0] )\n\n print(\"Starting Test Loop...\")\n n_print = 1\n model_G.eval()\n model_D.eval()\n for step in tqdm( range(args.n_samplings+1), desc = \"Samplings\" ):\n # 入力ノイズを線形補間\n input_noize_z = input_noize_z_src + step * dz\n\n # 生成器 G の 推論処理\n with torch.no_grad():\n # G(z) : 生成器から出力される偽物画像\n G_z = model_G( input_noize_z, final_progress )\n\n # 出力画像の生成&保存\n save_image( tensor = G_z, filename = os.path.join(args.results_dir, args.exper_name) + \"/frame_{0:04d}.png\".format( step ) )\n n_print -= 1\n\n print(\"Finished Test Loop.\")\n\n #======================================================================\n # 生成した連番画像を動画化\n #======================================================================\n if( args.codec == \"gif\"):\n frames = []\n for step in range(args.n_samplings+1):\n img = Image.open( os.path.join(args.results_dir, args.exper_name) + \"/frame_{0:04d}.png\".format(step) )\n frames.append(img)\n\n frames[0].save(\n os.path.join(args.results_dir, args.exper_name) + \"/morphing_video.gif\",\n save_all=True,\n append_images=frames[1:]\n )\n\n else:\n img = cv2.imread(os.path.join(args.results_dir, args.exper_name) + \"/frame_0000.png\" )\n width, height = img.shape[1], img.shape[0]\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n video = cv2.VideoWriter( \n os.path.join(args.results_dir, args.exper_name) + \"/morphing_video.mp4\", \n fourcc, args.fps, \n (width, height)\n )\n\n for step in range(args.n_samplings+1):\n img = cv2.imread(os.path.join(args.results_dir, args.exper_name) + \"/frame_{0:04d}.png\".format(step))\n video.write(img)\n\n video.release()\n","repo_name":"Yagami360/MachineLearning_Exercises_Python_PyTorch","sub_path":"GAN_PGGAN_PyTorch/test_morphing.py","file_name":"test_morphing.py","file_ext":"py","file_size_in_byte":9260,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"}
+{"seq_id":"10621022804","text":"from image import Image\nimport threading\nfrom role import Role\nfrom villager_listener import VillagerListener\nfrom constant import Constant\nfrom debug_print import *\nfrom skill import Skill\nimport json\nimport pygame\nfrom land import Land\nfrom attack import AttackAnimation\nfrom house import House\nfrom item import Item\nfrom constant_image import ConstantImage\nimport random\n\n\nclass Villager(Image, threading.Thread):\n\n lock = threading.RLock()\n # for testing purpose only want to create one leader\n leader_taken = False\n \n\n def __init__(self, image, position, villager_id, font, listener, current_leader, skill_images):\n self.role = Role.FOLLOWER\n self.listener = listener\n self.current_leader = current_leader\n self.leadership_term = 0\n # render shouting\n self.message_count = 1\n # for testing to only create one leader\n self.skills = []\n self.skill_adding_list = []\n self.max_health = Constant.VILLAGER_MAX_HP\n self.current_health = self.max_health\n self.current_message = \"\"\n self.message_countdown = 0\n self.learned_skill_names = []\n self.turning_learned_skills_list = []\n self.dead = False\n self.dead_message_sent = False\n width, height = image.get_rect().size\n center_x, center_y = position\n super().__init__(image, center_x, center_y, height, width)\n self.villager_id = villager_id\n self.font = font\n self.attacked = False\n self.item = []\n self.attack = None\n\n\n self.land = Land(self, Constant.LAND_SIZE)\n\n self.house = None\n self.build_house_countdown = Constant.BUILD_HOUSE_COUNT_DOWN\n\n threading.Thread.__init__(self)\n\n self.attack_probability = 0.5\n self.attack_display_count_down = Constant.ATTACK_DISPLAY_COUNT_DOWN\n self.attack_display_count_down_const = Constant.ATTACK_DISPLAY_COUNT_DOWN\n self.attacked = False\n self.attack_power = 1\n\n self.skill_images = skill_images\n\n\n def pickTile(self, tile):\n \"\"\"\n \n Check which tile is clicked by mouse, and applied its benefits to Villager\n \n :param tile: Tile \n :return: \n \"\"\"\n if tile.mature:\n if tile.tile_type == Constant.TILE_TYPE_PLANT:\n self.current_health_up_with_amount(Constant.PLANT_HEALTH_INCREASE)\n elif tile.tile_type == Constant.TILE_TYPE_ANIMAL:\n self.current_health_up_with_amount(Constant.ANIMAL_HEALTH_INCREASE)\n tile.un_mature()\n\n def addHouse(self):\n \"\"\"\n \n Add a house Object to Villager\n \n \"\"\"\n self.house = House(self.x, self.y)\n\n # armour\n def addItemToLeftHand(self, image, item_name, image_scale):\n \"\"\"\n \n Adding a item to the left hand side of the villager\n \n :param image: Image \n :param item_name: str\n :param image_scale: int\n :return: \n \"\"\"\n width, height = image.get_rect().size\n temp_item_center_x = self.x + width * image_scale // 2\n temp_item_center_y = self.y + width * image_scale\n temp_item = Item(image, temp_item_center_x, temp_item_center_y, item_name, image_scale)\n self.item.append(temp_item)\n\n # sword\n def addItemToRightHand(self, image, item_name, image_scale):\n \"\"\"\n\n Adding a item to the right hand side of the villager\n\n :param image: Image \n :param item_name: str\n :param image_scale: int\n :return: \n \"\"\"\n width, height = image.get_rect().size\n temp_item_center_x = self.x - width * image_scale\n temp_item_center_y = self.y\n temp_item = Item(image, temp_item_center_x, temp_item_center_y, item_name, image_scale)\n self.item.append(temp_item)\n\n def being_attacked(self, hp_decrement):\n \"\"\"\n \n if villager is attcked set the hp down\n \n :param hp_decrement: int\n \n \"\"\"\n self.current_health_down_with_amount(hp_decrement)\n\n\n\n def add_skill(self, skill_name):\n \"\"\"\n \n Add skill Object to player's skill list\n \n :param skill_name: str \n \n \"\"\"\n skill_num = len(self.skills)\n image = self.skill_images[skill_name]\n\n # each row render four skill, then go up\n one_skill = Skill(skill_name, image, self.x - self.width/2 - ((image.get_rect().size)[0] * Constant.SKILL_IMAGE_SCALE_VILLAGER) / 2, (self.y + self.height/2) - (int (skill_num) * int((image.get_rect().size)[1] * Constant.SKILL_IMAGE_SCALE_VILLAGER)), Constant.SKILL_IMAGE_SCALE_VILLAGER, False)\n self.skills.append(one_skill)\n\n\n def run(self):\n while (not self.dead) and (not self.listener.stopped):\n # consuming the parsed JSON message from the queue\n request = self.listener.request_queue.get()\n\n request_type = request[Constant.MESSAGE_TYPE]\n # according to the type of the request applying corresponding methods to villager\n if request_type == Constant.VILLAGER_DEAD:\n self.dead = True\n continue\n if request_type == Constant.APPEND and self.role == Role.LEADER:\n if not request[Constant.NEW_ENTRIES]:\n self.reclaim_authority()\n elif request_type == Constant.LEADERSHIP:\n self.set_leadership(request)\n elif request_type == Constant.REQUEST_VOTE:\n self.set_candidate(request)\n elif request_type == Constant.REQUEST_VOTE_REPLY:\n self.vote(request)\n elif request_type == Constant.REQUEST_COMMAND_ACK and self.role == Role.LEADER:\n self.leader_receive_learn(request)\n elif request_type == Constant.APPEND_REPLY:\n self.learning_skill(request)\n elif request_type == Constant.COMMIT_INDEX:\n self.learned_skill(request)\n if self.current_health == 0:\n debug_print(\"Villager\" + str(self.villager_id) + \" is dead\")\n self.dead = True\n\n if self.listener.stopped:\n print(str(self.villager_id) + \"'s listener is dead\")\n self.dead = True\n if self.dead:\n # if dead send the JSON to cooresponding remote Raft peer to ask it to terminate\n data = {Constant.MESSAGE_TYPE: \"villager_killed\", Constant.PEER_ID: self.listener.peer_id}\n try:\n self.listener.socket.sendall(str.encode(json.dumps(data) + \"\\n\"))\n print(\"villager killed message sent\")\n except ConnectionResetError:\n print(\"connection dead\")\n print(\"villager killed message sent\")\n self.listener.stop_listener()\n self.dead_message_sent = True\n\n\n def reclaim_authority(self):\n \"\"\"\n Display a dialgue box to show the string 'I'm the leader'\n \n \"\"\"\n self.set_message(Constant.AUTHORITY_MESSAGE)\n\n def set_leadership(self, request):\n \"\"\"\n \n trying to set the leader by this leadership request dictionary\n \n :param request: dict\n \"\"\"\n term = request[Constant.SENDER_TERM]\n # if there is still a leader and the term number of JSON messag is smaller than\n # current term ignore this outdated leader messge\n if self.current_leader and self.current_leader.leadership_term > term:\n return\n self.role = Role.LEADER\n self.leadership_term = term\n self.set_message(Constant.NEW_LEADER_MESSAGE)\n\n def set_candidate(self, request):\n \"\"\"\n \n set villager to candidate by this request_vote request\n \n :param request: dict\n \"\"\"\n term = request[Constant.SENDER_TERM]\n # abort the outdated request_vote message\n if self.current_leader and self.current_leader.leadership_term > term:\n return\n self.role = Role.CANDIDATE\n self.set_message(Constant.CANDIDATE_MESSAGE)\n\n def vote(self, request):\n term = request[Constant.SENDER_TERM]\n if self.current_leader and self.current_leader.leadership_term > term:\n return\n vote_for = request[Constant.VOTE_PEER_ID][4:]\n debug_print(type(request[Constant.VOTE_GRANTED]))\n if request[Constant.VOTE_GRANTED] == True:\n self.set_message(Constant.VOTE_MESSAGE.format(vote_for))\n\n def leader_receive_learn(self, request):\n skill_name = request[Constant.REQUEST_COMMAND_LIST][0]\n index = int(request[Constant.INDEX])\n if index == len(self.skills):\n self.add_skill(skill_name)\n while self.skill_adding_list:\n length = len(self.skills)\n if self.skill_adding_list[0][0] == length:\n skill = self.skill_adding_list.pop(0)\n self.add_skill(skill[1])\n else:\n break\n elif index > len(self.skills):\n self.skill_adding_list.append((index, skill_name))\n self.skill_adding_list.sort()\n\n def learning_skill(self, request):\n result = request[Constant.APPEND_RESULT]\n if result and self.current_leader:\n index = int(request[Constant.LAST_LOG_INDEX])\n if (index >= len(self.skills)) and (index < len(self.current_leader.skills)):\n for i in range(len(self.skills), index + 1):\n self.add_skill(self.current_leader.skills[i].skill_name)\n\n def learned_skill(self, request):\n debug_print(\"in learned_skill\")\n if not request:\n while self.turning_learned_skills_list and self.turning_learned_skills_list[0][0] == len(\n self.learned_skill_names):\n\n skill = self.turning_learned_skills_list.pop(0)\n self.learned_skill(skill[1])\n return\n index = int(request[Constant.INDEX])\n debug_print(\"index is\" + str(index))\n debug_print(\"skills: \")\n debug_print(self.skills)\n\n if (index < len(self.skills)) and (index == len(self.learned_skill_names)):\n skill_name = self.skills[index].skill_name\n debug_print(\"skill name: \" + skill_name)\n else:\n while self.turning_learned_skills_list and self.turning_learned_skills_list[0][0] == len(self.learned_skill_names):\n skill = self.turning_learned_skills_list.pop(0)\n self.learned_skill(skill[1])\n self.turning_learned_skills_list.append((index, request))\n self.turning_learned_skills_list.sort()\n debug_print(\"returned in else\")\n return\n if skill_name not in Constant.SKILLS:\n debug_print(\"not in skills\")\n return\n if skill_name == Constant.ARMOUR:\n self.addItemToLeftHand(ConstantImage.ARMOUR_IMAGE_SPRITE,Constant.ITEM_NAME_ARMOUR ,Constant.ARMOUR_IMAGE_SCLAE)\n elif skill_name == Constant.SWORD:\n self.addItemToRightHand(ConstantImage.SWORD_IMAGE_SPRITE, Constant.ITEM_NAME_SWORD, Constant.SWORD_IMAGE_SCALE)\n elif skill_name == Constant.ANIMAL:\n for tile in self.land.tiles:\n if tile.tile_type == Constant.TILE_TYPE_ANIMAL:\n tile.display_plant_or_animal = True\n elif skill_name == Constant.PLANT:\n for tile in self.land.tiles:\n if tile.tile_type == Constant.TILE_TYPE_PLANT:\n tile.display_plant_or_animal = True\n elif skill_name == Constant.HOUSE:\n self.addHouse()\n self.skills[index].greyed = False\n self.skills[index].applied = True\n debug_print(\"set skill greyed false\")\n self.learned_skill_names.append(skill_name)\n\n def set_message(self, message):\n self.current_message = message\n self.message_countdown = Constant.MESSAGE_TIME\n\n\n def max_health_up(self):\n self.max_health += 1\n\n def max_health_down(self):\n self.max_health -= 1\n\n def current_health_up(self):\n self.current_health += 1\n\n def current_health_down(self):\n self.current_health -= 1\n\n\n def current_health_up_with_amount(self, hp_increment):\n self.current_health += hp_increment\n if self.current_health > self.max_health:\n self.current_health = self.max_health\n\n\n def attack_monster_or_not(self, monster):\n\n if (Constant.SWORD not in self.learned_skill_names) or self.attacked or \\\n (Constant.SWORD in self.learned_skill_names):\n return\n\n self.attacked = random.random() >= self.attack_probability\n\n if self.attacked and self.attack_power > 0 :\n monster.set_attack(self.attack_power)\n self.attack = AttackAnimation(ConstantImage.VILLAGER_ATTACK_IMAGE_SPRITE, monster.x, monster.y, Constant.VILLAGER_ATTACK_IMAGE_SCALE)\n self.attack_display_count_down = self.attack_display_count_down_const\n\n else:\n self.attacked = False\n\n\n def current_health_down_with_amount(self, hp_decrement):\n\n if self.house is not None and self.house.display_house:\n self.house.house_durability_decrement_with_amount(hp_decrement)\n if self.house.current_durability <= 0:\n self.house.display_house = False\n return\n\n if Constant.ARMOUR in self.learned_skill_names:\n hp_decrement -= Constant.ITEM_ARMOUR_DEFEND_POWER_ADD\n\n if hp_decrement >= self.current_health:\n self.current_health = 0\n self.dead = True\n return\n self.current_health -= hp_decrement\n\n def build_house(self):\n if self.house:\n if not self.house.display_house:\n if self.build_house_countdown == Constant.BUILD_HOUSE_COUNT_DOWN:\n self.set_message(Constant.BUILD_HOUSE_MESSAGE)\n self.build_house_countdown -= 1\n elif self.build_house_countdown > 0:\n self.build_house_countdown -= 1\n else:\n self.house.display_house = True\n self.build_house_countdown = Constant.BUILD_HOUSE_COUNT_DOWN\n\n def render_attack(self, screen):\n if self.attacked and self.attack_display_count_down != 0:\n self.attack.render(screen)\n self.attack_display_count_down -= 1\n if self.attack_display_count_down <= 0:\n self.attacked = False\n\n def render(self, screen):\n\n if self.house and self.house.display_house:\n self.house.render(screen)\n\n super().render(screen)\n\n for one_skill in self.skills:\n one_skill.render(screen)\n\n self.land.render(screen)\n name = self.font.render(\"Villager \" + str(self.villager_id), 1, Constant.BLACK)\n screen.blit(name, (self.x - name.get_width() // 2, self.y + self.height // 2))\n\n if self.role != Role.FOLLOWER:\n if self.role == Role.LEADER:\n title = \"Leader\"\n role = self.font.render(title, 1, Constant.BLACK)\n screen.blit(role, (self.x - role.get_width() // 2, self.y + self.height // 2 + role.get_height() + 2))\n\n if self.message_countdown > 0:\n message = self.font.render(self.current_message, 1, Constant.BLACK)\n screen.blit(message, (self.x - message.get_width() // 2, self.y - self.height // 2 - message.get_height() - 2))\n self.message_countdown -= 1\n\n pygame.draw.rect(screen, Constant.GRAY, pygame.Rect((self.x - self.width // 2,\n self.y - self.height // 2),\n (self.width, Constant.HEAL_BAR_HEIGHT)))\n pygame.draw.rect(screen, Constant.RED, pygame.Rect((self.x - self.width // 2,\n self.y - self.height // 2),\n (self.width * (self.current_health / self.max_health),\n Constant.HEAL_BAR_HEIGHT)))\n\n for one_item in self.item:\n one_item.render(screen)\n\n","repo_name":"Dawindmill/Raft-In-Python","sub_path":"Visualization/villager.py","file_name":"villager.py","file_ext":"py","file_size_in_byte":16318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13553211571","text":"#! /usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport time\nimport csv\n\nprev_frame_time = 0\n\ndef callback(msg):\n\n roll = pitch = yaw = 0.0\n orientation_q = msg.pose.pose.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n (roll, pitch, yaw) = euler_from_quaternion (orientation_list)\n \n position_q = msg.pose.pose.position\n \n pos = [position_q.x, position_q.y, yaw]\n \n print(pos)\n \n global prev_frame_time\n new_frame_time = rospy.get_time()\n if (new_frame_time-prev_frame_time > 0):\n fps = str(round( 1/(new_frame_time-prev_frame_time), 2 ))\n else:\n fps = 'inf'\n prev_frame_time = new_frame_time\n logfps = 'odom rate: '+fps+' Hz'\n rospy.loginfo(logfps)\n \nrospy.init_node('odom')\nsub = rospy.Subscriber('/odom', Odometry, callback)\nrospy.spin()\n","repo_name":"jakabfarkas/ros_cone_detection","sub_path":"scripts/odom_subscriber.py","file_name":"odom_subscriber.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31299840913","text":"__author__ = 'mike'\nimport threading\nimport queue\n\n'''\nGlobally defined variables and functions go here\n'''\n\n\ndef init():\n # we need a globally accessible queue\n global lcdQueue\n lcdQueue = queue.Queue()\n global queueLock\n queueLock = threading.Lock() # queue is not thread safe without this\n","repo_name":"mikemeding/WeatherAppRPI","sub_path":"rpiweather/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26158863484","text":"\"\"\"S3 Connector module.\"\"\"\n\n###############################################################################\n# IMPORTS ########################################################### IMPORTS #\n###############################################################################\n\n# Standard library\nimport dataclasses\nimport logging\nimport traceback\n\n# Installed\nimport boto3\nimport botocore\n\n# Own modules\nimport dds_cli.utils\nfrom dds_cli import DDSEndpoint\n\n###############################################################################\n# LOGGING ########################################################### LOGGING #\n###############################################################################\n\nLOG = logging.getLogger(__name__)\n\n###############################################################################\n# CLASSES ########################################################### CLASSES #\n###############################################################################\n\n\n@dataclasses.dataclass\nclass S3Connector:\n \"\"\"Connect to Simple Storage Service.\"\"\"\n\n project_id: dataclasses.InitVar[str]\n token: dataclasses.InitVar[dict]\n safespring_project: str = dataclasses.field(init=False)\n keys: dict = dataclasses.field(init=False)\n url: str = dataclasses.field(init=False)\n bucketname: str = dataclasses.field(init=False)\n resource = None\n\n def __post_init__(self, project_id, token):\n \"\"\"Initiate S3Connector object by getting s3 info from API.\"\"\"\n (\n self.safespring_project,\n self.keys,\n self.url,\n self.bucketname,\n ) = self.__get_s3_info(project_id=project_id, token=token)\n\n # @connect_cloud\n def __enter__(self):\n \"\"\"Enter context.\"\"\"\n self.resource = self.connect()\n\n return self\n\n def __exit__(self, exc_type, exc_value, traceb):\n \"\"\"Close context manager, incl. connection.\"\"\"\n if exc_type is not None:\n traceback.print_exception(exc_type, exc_value, traceb)\n return False # uncomment to pass exception through\n\n return True\n\n def connect(self):\n \"\"\"Connect to S3 resource.\"\"\"\n # Connect to service\n try:\n session = boto3.session.Session()\n\n resource = session.resource(\n service_name=\"s3\",\n endpoint_url=self.url,\n aws_access_key_id=self.keys[\"access_key\"],\n aws_secret_access_key=self.keys[\"secret_key\"],\n )\n except (boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError) as err:\n LOG.warning(\"S3 connection failed: %s\", err)\n raise\n\n LOG.debug(\"Connected to S3.\")\n return resource\n\n # Static methods ############ Static methods #\n @staticmethod\n def __get_s3_info(project_id, token):\n \"\"\"Get information required to connect to cloud.\"\"\"\n # Perform request to API\n s3info, _ = dds_cli.utils.perform_request(\n DDSEndpoint.S3KEYS,\n method=\"get\",\n params={\"project\": project_id},\n headers=token,\n error_message=\"Failed to get cloud information\",\n )\n\n # Get s3 info\n\n safespring_project, keys, url, bucket = (\n s3info.get(\"safespring_project\"),\n s3info.get(\"keys\"),\n s3info.get(\"url\"),\n s3info.get(\"bucket\"),\n )\n if None in [safespring_project, keys, url, bucket]:\n raise SystemExit(\"Missing safespring information in response.\") # TODO: change\n\n return safespring_project, keys, url, bucket\n","repo_name":"ScilifelabDataCentre/dds_cli","sub_path":"dds_cli/s3_connector.py","file_name":"s3_connector.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"}
+{"seq_id":"23440925271","text":"import sys\r\n\r\ndef solveCase( infile ):\r\n\tanswer = \"\"\r\n\t\r\n\tstring_count = int( infile.readline().strip() )\r\n\tstrings = []\r\n\t\r\n\tfor i in range(string_count):\r\n\t\tstrings.append( infile.readline().strip() )\r\n\t\t\t\r\n\t#check if all the same string\r\n\tif len( set(strings) ) == 1:\r\n\t\treturn 0\r\n\t\t\r\n\tletter_counts = []\r\n\tfor s in strings:\r\n\t\tlc = []\r\n\t\twhile( s != \"\" ):\r\n\t\t\tchar = s[0]\r\n\t\t\tcount = 1\r\n\t\t\ts = s[1:]\r\n\t\t\twhile s != \"\" and s[0] == char:\r\n\t\t\t\tcount += 1\r\n\t\t\t\ts = s[1:]\r\n\t\t\t\t\r\n\t\t\tlc.append( (char,count) )\r\n\t\t\t\r\n\t\tletter_counts.append( lc )\r\n\t\t\r\n\t#make sure same length\r\n\tif len( set( [len(x) for x in letter_counts] ) ) != 1:\r\n\t\treturn \"Fegla Won\"\r\n\t\t\r\n\tchanges = []\r\n\tfor i in range( len(letter_counts) ):\r\n\t\tchange = 0\r\n\t\t#assume we are choosing this string to change the others to. How many total changes?\r\n\t\tlc = letter_counts[i]\r\n\t\tfor j in range( len(lc) ):\r\n\t\t\t(char,count) = lc[j]\r\n\t\t\tprint( \"char {} count {}\\n\".format( char, count ) )\r\n\t\t\t\r\n\t\t\tfor k in range( len(letter_counts) ):\r\n\t\t\t\t(other_char, other_count) = letter_counts[k][j]\r\n\t\t\t\tprint( \"other_char {} other_count {}\\n\".format( other_char, other_count ) )\r\n\t\t\t\t\r\n\t\t\t\tif char != other_char:\r\n\t\t\t\t\treturn \"Fegla Won\"\r\n\t\t\t\t\t\r\n\t\t\t\tchange += abs( count - other_count )\r\n\t\t\t\t\r\n\t\tchanges.append( change )\r\n\t\tprint( \"change {}\\n\".format( change ) )\r\n\t\t\r\n\treturn min( changes )\r\n\t\t\r\n\t\r\n\t\t\r\n\t\r\n\r\ndef solve(infile):\r\n\toutput = \"\"\r\n\tt_count = int( infile.readline() )\r\n\t\r\n\tfor i in range( t_count ):\r\n\t\tprint( \"*** Test {} ***\\n\".format( i + 1 ) )\r\n\t\tanswer = solveCase( infile )\r\n\t\toutput += \"Case #{}: {}\\n\".format( i+1, answer )\r\n\t\t\r\n\treturn output.strip()\r\n\r\nif( __name__ == \"__main__\" ):\r\n\tinfile_name = sys.argv[1]\r\n\t\r\n\toutput = \"__null__\"\r\n\twith open( infile_name ) as f:\r\n\t\toutput = solve( f )\r\n\t\t\r\n\twith open( infile_name + \".out\", \"w\" ) as of:\r\n\t\tof.write( output )\r\n\t\r\n\texit(0)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_142/813.py","file_name":"813.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4316921725","text":"import numpy as np\nfrom SystemObjects.Nodes import Node\nfrom SystemObjects.Routs import Route\nfrom copy import deepcopy\n\ndef OneStepAnnealing(route1,route2,Temperature):\n if route1.cost > route2.cost:\n currRoute = route2\n else:\n base = np.random.random()\n compare = np.exp(-abs(route1.cost-route2.cost)/Temperature)\n #print(base,compare)\n #currRoute = route2 if base <= compare else currRoute = route1\n if base<=compare:\n currRoute = route2\n else:\n currRoute = route1\n return currRoute","repo_name":"kapsikarsuyog/TSP_Solution_With_MetaHeuristics","sub_path":"SimulatedAnnealing/SimulatedAnnealingFunctions.py","file_name":"SimulatedAnnealingFunctions.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14426073475","text":"\nimport sys\nimport configparser\n\nCONFIG_FN = \"main.cfg\"\n\ndata = configparser.ConfigParser()\nif len( data.read(CONFIG_FN,encoding='utf8') )!=1:\n\tprint( f\"Failed to read config file '{CONFIG_FN}'\" )\n\tsys.exit(1)\n","repo_name":"TasurtSPb/microelectronics","sub_path":"modules/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24744442661","text":"import logging\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom pupil_detectors import Detector2D, DetectorBase, Roi\r\nfrom pyglui import ui\r\n\r\nfrom methods import normalize\r\n\r\nfrom pupil_detector_plugins import available_detector_plugins\r\nfrom pupil_detector_plugins.detector_base_plugin import (\r\n PupilDetectorPlugin,\r\n)\r\nfrom pupil_detector_plugins.visualizer_2d import draw_pupil_outline\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass CustomDetector(PupilDetectorPlugin):\r\n uniqueness = \"by_class\"\r\n icon_font = \"pupil_icons\"\r\n icon_chr = chr(0xEC18)\r\n\r\n label = \"Custom Detector\"\r\n\r\n # Use the same identifier as the built-in 2D pupil detector\r\n identifier = \"2d\"\r\n order = 0.9\r\n\r\n @property\r\n def pretty_class_name(self):\r\n return \"Custom Detector\"\r\n\r\n @property\r\n def pupil_detector(self) -> DetectorBase:\r\n return self.__detector_2d\r\n\r\n def __init__(self, g_pool=None):\r\n super().__init__(g_pool=g_pool)\r\n self.__detector_2d = Detector2D({})\r\n self._stop_other_pupil_detectors()\r\n\r\n def _stop_other_pupil_detectors(self):\r\n plugin_list = self.g_pool.plugins\r\n\r\n # Deactivate other PupilDetectorPlugin instances\r\n for plugin in plugin_list:\r\n if isinstance(plugin, PupilDetectorPlugin) and plugin is not self:\r\n plugin.alive = False\r\n\r\n # Force Plugin_List to remove deactivated plugins\r\n plugin_list.clean()\r\n\r\n def detect(self, frame, **kwargs):\r\n\r\n debug_img = frame.bgr if self.g_pool.display_mode == \"algorithm\" else None\r\n\r\n frame_data = np.asarray(bytearray(frame.jpeg_buffer), dtype=np.uint8)\r\n frame_bgr = cv2.imdecode(frame_data, cv2.IMREAD_COLOR)\r\n\r\n # Convert to grayscale\r\n frame_gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)\r\n\r\n # Apply a blur to reduce noise\r\n blurred_frame = cv2.GaussianBlur(frame_gray, (7, 7), 0)\r\n\r\n # Use Canny Edge Detection\r\n edges = cv2.Canny(blurred_frame, 100, 200)\r\n\r\n # Find contours in the edge map\r\n contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n result = {'ellipse': None, 'diameter': None, 'location': None, 'confidence': 0, 'id': 0, 'topic': None,\r\n 'method': None, 'timestamp': None, 'norm_pos': None}\r\n\r\n if contours:\r\n for contour in contours:\r\n # Calculate the area of the contour\r\n area = cv2.contourArea(contour)\r\n\r\n # Filter out very small contours based on their area\r\n if area > 100:\r\n # Fit a circle to the contour\r\n (x, y), radius = cv2.minEnclosingCircle(contour)\r\n center = (int(x), int(y))\r\n radius = int(radius)\r\n\r\n # Draw the circle on the image\r\n cv2.circle(frame_bgr, center, radius, (0, 255, 0), 2)\r\n\r\n # Update the result\r\n result['ellipse'] = {'center': (x, y), 'axes': (radius, radius), 'angle': 0}\r\n result['diameter'] = radius * 2 # The diameter of the circle\r\n result['location'] = (x, y) # The center of the circle\r\n result['confidence'] = 1 # Confidence is set to 1 for now\r\n\r\n eye_id = self.g_pool.eye_id\r\n\r\n result[\"id\"] = eye_id\r\n result[\"topic\"] = f\"pupil.{eye_id}.{self.identifier}\"\r\n result[\"method\"] = \"custom-2d\"\r\n result[\"timestamp\"] = frame.timestamp\r\n if result['location'] is not None:\r\n result[\"norm_pos\"] = normalize(result[\"location\"], (frame.width, frame.height), flip_y=True)\r\n\r\n ##with open(r'C:\\Users\\L1303\\Desktop\\pupilSource\\output.txt', 'w') as f:\r\n ## f.write(str(result))\r\n\r\n return result\r\n\r\n def init_ui(self):\r\n super().init_ui()\r\n self.menu.label = self.pretty_class_name\r\n self.menu_icon.label_font = \"pupil_icons\"\r\n info = ui.Info_Text(\"Custom 2D Pupil Detector Plugin\")\r\n self.menu.append(info)\r\n\r\n def gl_display(self):\r\n if self._recent_detection_result:\r\n draw_pupil_outline(self._recent_detection_result, color_rgb=(0.3, 1.0, 0.1))\r\n","repo_name":"reddote/CustomPupil","sub_path":"custom_2d.py","file_name":"custom_2d.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16300546167","text":"\n# coding: utf-8\n\n# In[1]:\n\n# Since the goal of the push is not specified in the test,\n# I assume that Freshr push notifications aim at engaging\n# users who are not currently active in the app\n# maybe through recommendations\n\n# Here I am getting for each user the day(s)\n# of the week when they are not active in the app\n# I also get for each of them the time slots when they are the most active \n# because push notifications must be sent at appropriate time,\n# when they are less likely to interrupt the user\n\n# In[2]:\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport time\nimport sys\n\n# In[3]:\n\n# Hourly time slots in a day\ntime_slots = [(i,i+1) for i in range(24)]\nweekdays = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}\n\n# Convert milliseconds timestamp to string formatted datetime\ndef ms_to_datetime(timestamp):\n return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp/1000.0))\n\n# Convert string formatted datetime to datetime object\ndef str_to_datetime(string):\n return datetime.strptime(string, '%Y-%m-%d %H:%M:%S')\n\n# Get the time slot from a string formatted datetime\ndef get_time_slot(string):\n hour = str_to_datetime(string).hour\n return time_slots[hour]\n\n# Get the weekday from a string formatted datetime\ndef get_weekday(string):\n dt_obj = str_to_datetime(string)\n return dt_obj.weekday()\n\n# Read CSV dataset into a dataframe, clean NaN and duplicates\n# and add columns that convert milliseconds timestamps to string formatted datetime\ndef csv_to_clean_df(dataset_path):\n df = pd.read_csv(dataset_path)\n df.dropna(inplace=True)\n df.drop_duplicates(inplace=True)\n ms_cols = ['watermark', 'timestamp']\n df[ms_cols] = df[ms_cols].applymap(ms_to_datetime)\n df['time_slot'] = df['timestamp'].apply(get_time_slot)\n df['weekday'] = df['timestamp'].apply(get_weekday)\n #df.head()\n return df\n\n# For each user get the days when he is inactive in the app\ndef get_inactive_weekdays(per_user_active_weekdays):\n weekdays_index = [key for key in weekdays]\n per_user_active_weekdays['inactive_weekdays'] = per_user_active_weekdays['active_weekdays']\n for i, user_id in enumerate(per_user_active_weekdays['user_id']):\n per_user_active_weekdays.set_value(i, 'inactive_weekdays',\n np.setdiff1d(weekdays_index,\n per_user_active_weekdays.ix[i, 'active_weekdays']).tolist())\n return per_user_active_weekdays\n\n# In[4]:\n\ndef main(argv):\n start = datetime.now()\n try:\n df = csv_to_clean_df(argv[0])\n except:\n print('File path argument error')\n sys.exit(1)\n # Get number of times each user opened the conversation for each 1h-window/slot in a day\n per_user_time_slot_count = df.groupby(['user_id', 'time_slot']).time_slot.count().reset_index(name=\"count\")\n # Get number of times each user opened the conversation for each weekday\n per_user_weekday_count = df.groupby(['user_id', 'weekday']).weekday.count().reset_index(name=\"count\")\n # Get the weekdays when the user is active in the app\n per_user_active_weekdays = per_user_weekday_count.groupby(['user_id'])['weekday']\\\n .apply(lambda x: list(x))\\\n .reset_index(name='active_weekdays')\n # Get also the weekdays when the user is not active in the app\n per_user_active_weekdays = get_inactive_weekdays(per_user_active_weekdays)\n #per_user_active_weekdays.head()\n\n # Get for each user the time slots in a day when he uses the most the application\n per_user_time_slots_max_count = per_user_time_slot_count.groupby(['user_id'])['count'].max().reset_index()\n per_user_most_active_time_slots = pd.merge(per_user_time_slot_count,\n per_user_time_slots_max_count,\n on=['user_id', 'count'])\n per_user_most_active_time_slots = per_user_most_active_time_slots.groupby(['user_id'])['time_slot']\\\n .apply(lambda x: list(x)) \\\n .reset_index(name=\"time_slots\")\n\n # Merge the results, make them readable and write the final dataframe to a CSV file\n per_user_best_time = pd.merge(per_user_active_weekdays,\n per_user_most_active_time_slots,\n on='user_id').drop('active_weekdays', axis=1)\n per_user_best_time['inactive_weekdays'] = per_user_best_time['inactive_weekdays'].apply(lambda x: [weekdays[idx] for idx in x])\n per_user_best_time['time_slots'] = per_user_best_time['time_slots'].apply(lambda slots: [\"between {0}h and {1}h\".format(slot[0], slot[1]) for slot in slots])\n per_user_best_time.to_csv(argv[1])\n print(datetime.now() - start)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print('Too few arguments')\n sys.exit(1)\n elif len(sys.argv) > 3:\n print('Too many arguments')\n sys.exit(1)\n main(sys.argv[1:])\n\n\n","repo_name":"Rigzzzz/Freshr","sub_path":"freshr_predict_push_time.py","file_name":"freshr_predict_push_time.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17776785954","text":"import os\n# source: https://sabuhish.github.io/fastapi-mail/example/\n\nfrom fastapi_mail import FastMail, MessageSchema, ConnectionConfig\nfrom starlette.responses import JSONResponse\n#from starlette.requests import Request\nfrom fastapi_mail import FastMail, MessageSchema, ConnectionConfig\nfrom pydantic import EmailStr\nfrom typing import List\nfrom utils import get_env_variable\n#from fastapi_mail.email_utils import DefaultChecker\n\ndef mail_service_conf():\n return ConnectionConfig(\n MAIL_USERNAME = get_env_variable(\"MAIL_USERNAME\"),\n MAIL_PASSWORD = get_env_variable(\"MAIL_PASSWORD\"),\n MAIL_FROM = get_env_variable(\"MAIL_USERNAME\"),\n MAIL_PORT = 587,\n MAIL_SERVER = \"smtp.gmail.com\",\n MAIL_FROM_NAME=\"Packer Solver\",\n MAIL_TLS = True,\n MAIL_SSL = False,\n USE_CREDENTIALS = True,\n VALIDATE_CERTS = True\n )\n\nasync def send_test_email(recipients: List[EmailStr]) -> JSONResponse:\n try:\n message = MessageSchema(subject=\"fastapi_mail Test\", recipients=recipients, body=\"Hello!\")\n fm = FastMail(mail_service_conf())\n await fm.send_message(message)\n return JSONResponse(status_code=200, content={\"message\": \"email has been sent\"})\n except Exception as ex:\n return JSONResponse(status_code=400, content={\"message\": f\"{ex}\"})\n\nasync def send_to_one(recipient: EmailStr, subject: str, body: str) -> JSONResponse:\n try:\n message = MessageSchema(\n subject = subject,\n recipients = [recipient],\n body = body\n )\n\n fm = FastMail(mail_service_conf())\n await fm.send_message(message)\n return JSONResponse(status_code=200, content={\"message\": f\"email has been sent to {recipient}\"})\n\n except Exception as ex:\n return JSONResponse(status_code=400, content={\"message\": f\"{ex}\"})\n","repo_name":"urmaspitsi/PackerUserManagement","sub_path":"emailer.py","file_name":"emailer.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23569445401","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\ndef readint(): return int(input())\r\ndef readfloat(): return float(input())\r\ndef readarray(N, foo):\r\n res = []\r\n for _ in xrange(N):\r\n res.append(foo())\r\n return res\r\ndef readlinearray(foo): return map(foo, input().split())\r\n\r\nN1 = 0\r\nN2 = 1\r\nINDEX = 2\r\n\r\ndef countLeft(pos, stalls):\r\n count = 0\r\n \r\n for i in range(pos-1, -1, -1):\r\n if stalls[i] == 'O': break\r\n count += 1\r\n \r\n return count\r\n\r\ndef countRight(pos, stalls):\r\n count = 0\r\n \r\n for i in range(pos+1, len(stalls)):\r\n if stalls[i] == 'O': break\r\n count += 1\r\n \r\n return count\r\n \r\ndef computeLsRs(stalls):\r\n ll = []\r\n for i in range(len(stalls)):\r\n if stalls[i] == 'O': continue\r\n ls = countLeft(i, stalls)\r\n rs = countRight(i, stalls)\r\n ll.append((min(ls, rs), max(ls, rs), len(stalls) -1 - i))\r\n return ll\r\n\r\ndef solve(N, K):\r\n \r\n N += 2\r\n \r\n stalls = N * ['.']\r\n stalls[0] = 'O'\r\n stalls[-1] = 'O'\r\n \r\n chosen = None\r\n \r\n #print(str(N) + ' ' + str(K))\r\n #print(stalls)\r\n for i in range(K):\r\n lsrs = computeLsRs(stalls)\r\n #print(lsrs)\r\n chosen = max(lsrs)\r\n i = N -1 - chosen[INDEX]\r\n stalls[i] = 'O'\r\n # y is max(LS, RS), and z is min(LS, RS) \r\n y = chosen[N2]\r\n z = chosen[N1]\r\n #print(stalls)\r\n #print(str(y) + ' ' + str(z))\r\n \r\n #lsrs = computeLsRs(stalls)\r\n #chosen = max(lsrs)\r\n \r\n return y, z\r\n\r\ndef solve1(N, K):\r\n \r\n ll = [N]\r\n for i in range(K):\r\n # v // 2 + v % 2, v // 2\r\n v = ll.pop(0) - 1\r\n y, z = v // 2 + v % 2, v // 2\r\n ll.append(y)\r\n ll.append(z)\r\n ll = sorted(ll, reverse=True)\r\n \r\n #if ll != sorted(ll, reverse=True): print('ops')\r\n \r\n return y, z\r\nif __name__ == '__main__':\r\n\r\n T = readint()\r\n for t in range(1, T+1):\r\n N, K = readlinearray(int)\r\n y, z = solve1(N, K)\r\n print('Case #%d: %d %d' % (t, y, z))\r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2140.py","file_name":"2140.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23384160891","text":"# Javier Fernandez Google Code Jam 2013\n# Google Code Jam 2013\n# javierfdr@gmail.com - javierfdr\n# Tic Tac Toe Tomek\n\nimport sys\n\n# return 10 poosible character strikes\ndef process_board(ia):\n\tstrikes = [[],[],[],[],[],[],[],[],[],[]]\n\tstrikes[0] = ia[0]\n\tstrikes[1] = ia[1]\n\tstrikes[2] = ia[2]\n\tstrikes[3] = ia[3]\n\tstrikes[4] = [ia[0][0],ia[1][0],ia[2][0],ia[3][0]]\n\tstrikes[5] = [ia[0][1],ia[1][1],ia[2][1],ia[3][1]]\n\tstrikes[6] = [ia[0][2],ia[1][2],ia[2][2],ia[3][2]]\n\tstrikes[7] = [ia[0][3],ia[1][3],ia[2][3],ia[3][3]]\n\tstrikes[8] = [ia[0][0],ia[1][1],ia[2][2],ia[3][3]]\n\tstrikes[9] = [ia[3][0],ia[2][1],ia[1][2],ia[0][3]]\n\n\treturn strikes\n\n# create 10 possible winning strikes from 4 input array\ndef numerize_board(ia):\n\tnum_strikes = []\n\tfor row in ia:\n\t\tnum_strikes.append(numerize_input(row))\n\n\treturn num_strikes\n\n# transform from characters to input. X-1, O-1, T-0\ndef numerize_input(a):\n\tb = []\n\tfor i in a:\n\t\tif i=='T':\n\t\t\tb.append(0)\n\t\telif i=='O':\n\t\t\tb.append(1)\n\t\telif i=='X':\n\t\t\tb.append(-1)\n\treturn b\n\n# if row is filled with X or O or there is a T then is winning\ndef is_winning_row(char_row, num_row):\n\ts = sum(num_row)\n\tif s==3 or s==-3:\n\t\tif 'T' in char_row:\n\t\t\treturn s\n\tif s==4 or s==-4:\n\t\treturn s\n\t\n\tif len(num_row)==4:\n\t\treturn 10 # for filled row\n\telse:\n\t\treturn 0 #for non-finished\n\ndef solve_tic_tac(ia):\n\tchar_strikes = process_board(ia)\n\tnum_strikes = numerize_board(char_strikes)\n\n\tfull_rows = 0\n\n\tfor r in range(10):\n\n\t\tw = is_winning_row(char_strikes[r],num_strikes[r])\n\t\tif w==3 or w==4:\n\t\t\treturn 'O won'\n\t\telif w==-3 or w==-4:\n\t\t\treturn 'X won'\n\t\telif w==10:\n\t\t\tfull_rows = full_rows+1\n\n\tif full_rows == 10:\n\t\treturn 'Draw'\n\telse:\n\t\treturn 'Game has not completed'\n\t\t\n\n\nout_file = open('output.out','w+')\nin_file = sys.stdin\nnum_cases = int(in_file.readline())\nfor c in range(1,num_cases+1):\n\tcase = 'Case #'+str(c)+': '\n\tr = []\n\tr.append(list(in_file.readline().strip('\\n')))\n\tr.append(list(in_file.readline().strip('\\n')))\n\tr.append(list(in_file.readline().strip('\\n')))\n\tr.append(list(in_file.readline().strip('\\n')))\n\tin_file.readline().strip('\\n').split()\n\n\ts = solve_tic_tac(r)\n\n\tresult = case+s+'\\n'\n\tout_file.write(result)\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/475.py","file_name":"475.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"264323615","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#export OPENAI_API_KEY=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\n\nimport re\nimport requests\nfrom bs4 import BeautifulSoup # type: ignore\n\n# write a function that can extract text from a list of URLs and returns a text documents per URL\ndef get_documents(urls: list) -> dict[str, str]:\n \"\"\"Extract text from a list of URLs and returns a text documents per URL\n\n Args:\n urls (list): list of URLs\n\n Returns:\n list: list of text documents\n \"\"\"\n documents = {}\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',\n 'Accept-Language': 'en-US,en;q=0.9',\n # Add more headers if necessary\n }\n for url in urls:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n # Extract the text content from the response\n # soup = BeautifulSoup(response.content, 'html.parser')\n text = extract_text_with_links(response.content)\n # text = soup.get_text()\n # text = re.sub(r'\\n{2,}', '\\n', text)\n documents[url] = text\n elif response.status_code == 403:\n print(f\"403 Forbidden: Access to the webpage is restricted. {response.reason}\")\n else:\n print(f\"Error: {response.status_code} - Unable to access the webpage.\")\n return documents\n\n\ndef extract_text_with_links(html):\n # Create a BeautifulSoup object\n soup = BeautifulSoup(html, 'html.parser')\n\n # Remove unwanted tags\n unwanted_tags = ['script', 'style']\n for tag in soup.find_all(unwanted_tags):\n tag.decompose()\n\n # Process specific tags\n for a in soup.find_all('a'):\n text = a.get_text(strip=True)\n href = a.get('href')\n if text and href:\n a.string = f'{text} ({href})'\n\n for br in soup.find_all('br'):\n br.insert_after('\\n')\n\n for p in soup.find_all('p'):\n p.insert_after('\\n\\n')\n for child in p.find_all(recursive=False):\n if child.name != 'br':\n child.insert_before(' ')\n\n for heading in soup.find_all(re.compile('^h[1-6]$')):\n heading.insert_after('\\n\\n')\n\n # Extract text\n text = soup.get_text(separator=' ')\n\n text = re.sub(r'\\n+', '\\n', text.strip())\n text = re.sub(r'\\n\\s+', '\\n', text.strip())\n # text = re.sub(r'\\s+', ' ', text)\n\n return text\n","repo_name":"itissid/Drop-PoT","sub_path":"src/drop_backend/utils/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72734063235","text":"from argparse import ArgumentParser\nimport sys\nimport logging\nimport pkg_resources\nimport csv\nfrom quicksect import IntervalTree\nimport networkx as nx\nfrom pathlib import Path\nfrom copy import copy\nimport re\n\n\nEXIT_FILE_IO_ERROR = 1\nEXIT_COMMAND_LINE_ERROR = 2\nEXIT_TSV_FILE_ERROR = 3\nDEFAULT_VERBOSE = False\nDEFAULT_OVERLAP = 0.75 \nPROGRAM_NAME = \"cnvmerge\"\n\n\ntry:\n PROGRAM_VERSION = pkg_resources.require(PROGRAM_NAME)[0].version\nexcept pkg_resources.DistributionNotFound:\n PROGRAM_VERSION = \"undefined_version\"\n\n\ndef exit_with_error(message, exit_status):\n '''Print an error message to stderr, prefixed by the program name and 'ERROR'.\n Then exit program with supplied exit status.\n\n Arguments:\n message: an error message as a string.\n exit_status: a positive integer representing the exit status of the\n program.\n '''\n logging.error(message)\n print(\"{} ERROR: {}, exiting\".format(PROGRAM_NAME, message), file=sys.stderr)\n sys.exit(exit_status)\n\n\ndef parse_args():\n '''Parse command line arguments.\n Returns Options object with command line argument values as attributes.\n Will exit the program on a command line error.\n '''\n description = 'Merge distilled SVs'\n parser = ArgumentParser(description=description)\n parser.add_argument('--version',\n action='version',\n version='%(prog)s ' + PROGRAM_VERSION)\n parser.add_argument('--log',\n metavar='LOG_FILE',\n type=str,\n help='record program progress in LOG_FILE')\n parser.add_argument('--overlap',\n metavar='PERCENTAGE',\n default=DEFAULT_OVERLAP,\n type=float,\n help='percentage overlap for CNV equality (default {})'.format(DEFAULT_OVERLAP))\n parser.add_argument('tsv_files',\n nargs='*',\n metavar='TSV_FILE',\n type=str,\n help='Input TSV files')\n return parser.parse_args()\n\n\nclass CNVIntervals(object):\n def __init__(self):\n self.chroms = {}\n\n def add(self, chrom, start, end, val):\n if chrom not in self.chroms:\n self.chroms[chrom] = IntervalTree()\n tree = self.chroms[chrom]\n tree.add(start, end, val)\n\n def lookup(self, chrom, start, end):\n if chrom in self.chroms:\n return self.chroms[chrom].search(start, end)\n else:\n return [] \n\n\n# mapping from unique integer (count) to variant record\nclass Variants(object):\n def __init__(self):\n self.variants = {}\n self.count = 0\n\n def add(self, variant):\n self.variants[self.count] = variant\n self.count += 1\n\ndef get_sample_name(filepath):\n fields = filepath.split('.')\n if len(fields) > 0:\n sample = fields[0]\n else:\n sample = filepath\n return sample\n\ndef read_tsv_files(options):\n sample_ids = set()\n variants = Variants()\n for tsv_filename in options.tsv_files:\n logging.info(\"Processing TSV file from %s...\", tsv_filename)\n sample = get_sample_name(tsv_filename)\n sample_ids.add(sample)\n with open(tsv_filename) as file:\n reader = csv.DictReader(file, delimiter=\"\\t\")\n for row in reader:\n row['sample'] = sample\n variants.add(row)\n logging.info(\"Processing TSV file from %s: done\", tsv_filename)\n return sample_ids, variants\n\n\ndef cnv_intervals(variants):\n logging.info(\"Computing %i CNV intervals\", len(variants))\n intervals = CNVIntervals()\n for idx, (variant_id, variant_info) in enumerate(variants.items()):\n chrom = variant_info['chr']\n start = int(float(variant_info['start']))\n end = int(float(variant_info['end']))\n intervals.add(chrom, start, end, variant_id)\n if (idx + 1) % 100000 == 0:\n logging.info('Computing %i CNV intervals: %i done', len(variants), idx + 1)\n logging.info(\"Computing %i CNV intervals, done\", len(variants))\n return intervals\n\n\ndef is_overlap(start1, end1, start2, end2, min_overlap):\n overlap_start = max(start1, start2)\n overlap_end = min(end1, end2)\n if overlap_start < overlap_end:\n overlap_size = float((overlap_end - overlap_start) + 1)\n cnv1_size = (end1 - start1) + 1\n cnv2_size = (end2 - start2) + 1\n cnv1_overlap = overlap_size / cnv1_size\n cnv2_overlap = overlap_size / cnv2_size\n return cnv1_overlap >= min_overlap and cnv2_overlap >= min_overlap\n return False\n\ndef get_intersections(overlap, variants, intervals):\n logging.info(\"Computing %i CNV intersections...\", len(variants))\n overlaps = nx.Graph() \n for idx, (variant_id, variant_info) in enumerate(variants.items()):\n # make sure all variants are recorded in the graph\n overlaps.add_node(variant_id)\n chrom = variant_info['chr']\n start = int(float(variant_info['start']))\n end = int(float(variant_info['end']))\n this_state = variant_info['state'] \n intersections = { i for i in intervals.lookup(chrom, start, end) }\n for other_variant in intersections:\n other_variant_id = other_variant.data\n other_variant_info = variants[other_variant_id]\n # don't add self edges\n if variant_id != other_variant_id and \\\n is_overlap(start, end, other_variant.start, other_variant.end, overlap) and \\\n this_state == other_variant_info['state']:\n overlaps.add_edge(variant_id, other_variant_id)\n if (idx + 1) % 100000 == 0:\n logging.info(\"Computing %i variant intersections: %i done\", len(variants), idx + 1)\n logging.info(\"Computing %i variant intersections: done\", len(variants))\n return overlaps\n\n\ndef list_median(items):\n mid_pos = len(items) // 2\n return sorted(items)[mid_pos] \n\ndef average(items):\n return (sum(items) / len(items))\n\ndef build_evidence(variants, samples):\n # evidence: mapping, sample -> set(caller)\n num_positive_samples = 0\n evidence = set()\n for var in variants:\n this_sample = var['sample']\n evidence.add(this_sample)\n results = []\n for sample in samples:\n if sample in evidence:\n num_positive_samples += 1\n results.append(1)\n else:\n results.append(0)\n return num_positive_samples, results\n\n\ndef merge_overlaps(sample_ids, variants, overlaps):\n logging.info(\"Merging overlapping variants...\")\n writer = csv.writer(sys.stdout, delimiter=\"\\t\")\n sorted_samples = sorted(sample_ids)\n header = [\"chr\", \"start\", \"end\", \"state\", \"median\", \"num pos samples\"] + sorted_samples \n writer.writerow(header)\n for component in nx.connected_components(overlaps):\n if len(component) > 0:\n variant_infos = [variants[id] for id in component]\n first_info = variant_infos[0]\n chrom = first_info['chr']\n state = first_info['state']\n start = min([int(float(info['start'])) for info in variant_infos])\n end = max([int(float(info['end'])) for info in variant_infos])\n avg_median = average([float(info['median']) for info in variant_infos])\n num_positive_samples, evidence = build_evidence(variant_infos, sorted_samples)\n writer.writerow([chrom, start, end, state, avg_median, num_positive_samples] + evidence) \n logging.info(\"Merging overlapping variants: done\")\n\n\ndef init_logging(log_filename):\n '''If the log_filename is defined, then\n initialise the logging facility, and write log statement\n indicating the program has started, and also write out the\n command line from sys.argv\n\n Arguments:\n log_filename: either None, if logging is not required, or the\n string name of the log file to write to\n Result:\n None\n '''\n if log_filename is None:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s - %(message)s',\n datefmt='%m-%d-%Y %H:%M:%S')\n else:\n logging.basicConfig(filename=log_filename,\n level=logging.DEBUG,\n filemode='w',\n format='%(asctime)s %(levelname)s - %(message)s',\n datefmt='%m-%d-%Y %H:%M:%S')\n logging.info('computation started')\n logging.info('command line: %s', ' '.join(sys.argv))\n\n\ndef main():\n \"Orchestrate the execution of the program\"\n options = parse_args()\n init_logging(options.log)\n sample_ids, variants = read_tsv_files(options)\n intervals = cnv_intervals(variants.variants)\n overlaps = get_intersections(options.overlap, variants.variants, intervals)\n merge_overlaps(sample_ids, variants.variants, overlaps)\n logging.info(\"computation ended\")\n\n\n# If this script is run from the command line then call the main function.\nif __name__ == '__main__':\n main()\n","repo_name":"bjpop/svdistil","sub_path":"svdistil/cnvmerge.py","file_name":"cnvmerge.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15077274052","text":"#!/usr/bin/python3\n\nfrom netaddr import IPNetwork, iter_iprange, glob_to_iprange\nimport re\nimport sys\nimport os.path\nimport ipaddress\n\nclass IPParser:\n def __init__(self, filename):\n self.filename = filename\n\n def is_valid_ip_address(self, ip):\n try:\n ipaddress.ip_address(ip)\n return True\n except ValueError:\n return print(\"Not a valid IP address\")\n\n def process(self, ip_value):\n if re.search('-', ip_value):\n self.dash2(ip_value)\n self.withdash(ip_value)\n elif re.search('/', ip_value):\n for ip in IPNetwork(ip_value).iter_hosts():\n print(ip)\n elif re.search(r'^\\s*$', ip_value):\n pass\n elif re.search('\\*+', ip_value):\n for ip in glob_to_iprange(ip_value):\n print(ip)\n elif len(ip_value) == 0:\n pass\n else:\n if self.is_valid_ip_address(ip_value):\n print(ip_value.lstrip().rstrip())\n\n def withdash(self, ip_value):\n try:\n split_ip = ip_value.split('-')\n firstip, secondip = split_ip[0].lstrip().rstrip(), split_ip[1].lstrip().rstrip()\n listfirstip, listsecondip = list(firstip.split('.')), list(secondip.split('.'))\n start, end = int(listfirstip[3]), int(listsecondip[3])\n first_3_octets = listfirstip[0:3]\n first_3 = \".\".join(str(dot) for dot in first_3_octets)\n\n while start <= end:\n print(first_3 + \".\" + str(start))\n start = start + 1\n except IndexError:\n pass\n\n def dash2(self, ip):\n try:\n pull_ip = ip.split('-')\n if len(pull_ip[1]) <= 3:\n firstip, secondip = pull_ip[0].lstrip().rstrip(), pull_ip[1].lstrip().rstrip()\n listfirstip, listsecondip = list(firstip.split('.')), list(secondip.split('.'))\n start, end = int(listfirstip[3]), int(listsecondip[0])\n first_3_octets = listfirstip[0:3]\n first_3 = \".\".join(str(dot) for dot in first_3_octets)\n\n while start <= end:\n print(first_3 + \".\" + str(start))\n start = start + 1\n\n except IndexError:\n pass\n\n def usage(self):\n print(\"\\n\" + \"Example Usage is: ./ipparser.py \\\"nameoffile.txt\\\"\" + \"\\n\")\n\n def main(self):\n try:\n if not os.path.isfile(self.filename):\n print(\"File not Found\")\n\n with open(self.filename, 'r') as ip_option:\n newlist = [line.strip() for line in ip_option]\n\n for ip in newlist:\n if re.search(',', ip):\n split_ip = ip.split(',')\n for octet in split_ip:\n self.process(octet)\n elif re.search(';', ip):\n split_ip = ip.split(';')\n for octet in split_ip:\n self.process(octet)\n else:\n self.process(ip)\n\n except:\n print(\"You must supply a file to parse\")\n self.usage()\n\n sys.exit(2)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: ./ipparser.py \")\n else:\n ip_parser = IPParser(sys.argv[1])\n ip_parser.main()\n","repo_name":"bmethvien/Penetration-Testing-Tools","sub_path":"ipparser.py","file_name":"ipparser.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34134124442","text":"import glob\nfrom tqdm import tqdm\nimport os\nimport fire\n\nfrom Rignak_Misc.path import create_path\n\n# python divide_dataset.py E:\\datasets\\waifu2latent\n\ndef divide_dataset(folder, train_to_val_ratio=10):\n labels = [os.path.join(folder, subfolder) for subfolder in os.listdir(folder) if os.path.isdir(os.path.join(folder, subfolder))]\n for label in tqdm(labels):\n train_folder = os.path.join(folder, 'train', os.path.split(label)[-1])\n val_folder = os.path.join(folder, 'val', os.path.split(label)[-1])\n create_path(train_folder)\n create_path(val_folder)\n for i, filename in tqdm(enumerate(glob.glob(os.path.join(label, '*.png')))):\n new_filename = os.path.join(train_folder, os.path.split(filename)[-1]) if i % train_to_val_ratio else os.path.join(val_folder, os.path.split(filename)[-1])\n os.rename(filename, new_filename)\n\n\nif __name__ == '__main__':\n fire.Fire(divide_dataset)\n","repo_name":"AurelienColin/ImageProcessing","sub_path":"divide_dataset.py","file_name":"divide_dataset.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4169652233","text":"import logging\nimport theano\nfrom theano.gradient import disconnected_grad\nfrom theano import tensor\n\nfrom blocks.graph import ComputationGraph\nfrom blocks.filter import VariableFilter\nfrom blocks.bricks import Linear, NDimensionalSoftmax\nfrom blocks.bricks.base import application\nfrom blocks.roles import OUTPUT, add_role, WEIGHT\nfrom blocks.utils import dict_subset, shared_floatx_nans\nfrom blocks_extras.bricks.sequence_generator2 import SoftmaxReadout, MergeReadout\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReinforceReadout(SoftmaxReadout):\n\n def __init__(self, reward_brick, entropy=None, **kwargs):\n super(ReinforceReadout, self).__init__(**kwargs)\n self.reward_brick = reward_brick\n self.entropy_coof = entropy\n\n self.value_prediction = Linear(output_dim=1, name='value_prediction')\n\n self.children += [\n reward_brick, self.value_prediction]\n\n self.costs.inputs += ['attended', 'attended_mask']\n\n def _push_allocation_config(self):\n super(ReinforceReadout, self)._push_allocation_config()\n self.value_prediction.input_dim = self.get_dim('states')\n\n @application\n def costs(self, application_call, prediction, prediction_mask,\n groundtruth, groundtruth_mask,\n **inputs):\n states = disconnected_grad(inputs['states'])\n\n merged = self.merge(**dict_subset(inputs, self.merge_names))\n # Compute log-probabilities for the predicted tokens\n log_probs = -self.all_scores(prediction, merged) * prediction_mask\n # Compute per-token rewards\n rewards = self.reward_brick.apply(prediction, prediction_mask,\n groundtruth, groundtruth_mask).sum(axis=-1)\n # Encourage entropy by adding negated log-probs to the rewards\n application_call.add_auxiliary_variable(log_probs, name='log_probs')\n if self.entropy_coof:\n rewards += self.entropy_coof * disconnected_grad(-log_probs)\n\n future_rewards = rewards[::-1].cumsum(axis=0)[::-1]\n\n baselines = self.value_prediction.apply(states)[:, :, 0]\n application_call.add_auxiliary_variable(\n baselines, name='baselines')\n # Compute baseline error\n centered_future_rewards = future_rewards - baselines\n baseline_errors = (\n (centered_future_rewards *\n disconnected_grad(prediction_mask)) ** 2).sum(axis=0)\n application_call.add_auxiliary_variable(\n baseline_errors, name='baseline_errors')\n\n # The gradient of this will be the REINFORCE 1-sample\n # gradient estimate\n costs = (disconnected_grad(centered_future_rewards)\n * log_probs\n * prediction_mask).sum(axis=0)\n\n # Add auxiliary variables for intermediate steps of the computation\n application_call.add_auxiliary_variable(\n rewards, name='rewards')\n application_call.add_auxiliary_variable(\n log_probs.copy(), name='prediction_log_probs')\n\n return costs\n\n\nclass CriticReadout(MergeReadout):\n\n def __init__(self, num_tokens,\n value_softmax=False, same_value_for_wrong=False,\n groundtruth_word_bonus=False, dueling_outputs=False, **kwargs):\n self.value_softmax = value_softmax\n self.same_value_for_wrong = same_value_for_wrong\n self.groundtruth_word_bonus = groundtruth_word_bonus\n self.dueling_outputs = dueling_outputs\n super(CriticReadout, self).__init__(post_merge_dim=num_tokens, **kwargs)\n self.costs.inputs = ([\n 'prediction', 'prediction_mask',\n 'groundtruth', 'groundtruth_mask']\n + self.input_names)\n\n def _allocate(self):\n w = shared_floatx_nans((self.get_dim('states'),), name='add_weights')\n add_role(w, WEIGHT)\n self.parameters.append(w)\n\n def _initialize(self):\n self.weights_init.initialize(self.parameters[0], self.rng)\n\n # For compatibility with Blocks-extras\n def sample(self):\n raise NotImplementedError()\n\n # For compatibility with Blocks-extras\n def scores(self):\n pass\n\n @application\n def costs(self, prediction, prediction_mask,\n groundtruth, groundtruth_mask, **inputs):\n outputs = self.all_outputs(groundtruth, groundtruth_mask, **inputs)\n # It does not matter what we return here, as long as it contains\n # the values in the computation graph.\n return outputs.sum()\n\n @application\n def all_outputs(self, application_call, groundtruth, groundtruth_mask, **inputs):\n outputs = self.merge(**dict_subset(inputs, self.merge_names))\n indices = tensor.repeat(\n tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])\n if self.value_softmax:\n logger.debug('Applying value softmax')\n outputs = (tensor.addbroadcast(outputs[:, :, :1], 2)\n + self.softmax.apply(outputs[:, :, 1:], extra_ndim=1))\n if self.same_value_for_wrong:\n logger.debug('Same value for apriori wrong actions')\n wrong_output = outputs[:, :, 0]\n outputs = outputs[:, :, 1:]\n wrong_mask = tensor.ones_like(outputs[0])\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n outputs = (outputs * (1 - wrong_mask)\n + wrong_output[:, :, None] * wrong_mask)\n application_call.add_auxiliary_variable(wrong_mask, name='wrong_mask')\n if self.groundtruth_word_bonus:\n logger.debug('Bonus for grondtruth words')\n wrong_mask = tensor.ones_like(outputs[0])\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n w, = self.parameters\n bonuses = inputs['states'].dot(w)\n outputs += bonuses[:, :, None] * (1 - wrong_mask)[None, :, :]\n if self.dueling_outputs:\n logger.debug('Dueling outputs a-la dueling networks')\n base_output = outputs[:, :, [0]]\n dueling_outputs = outputs[:, :, 1:]\n outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=2, keepdims=True)\n return outputs\n\n @application\n def outputs(self, groundtruth, groundtruth_mask, **inputs):\n # Copy-pasted from all_outputs, because Theano does not support ellipsis\n outputs = self.merge(**dict_subset(inputs, self.merge_names))\n indices = tensor.repeat(\n tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])\n if self.value_softmax:\n logger.debug('Applying value softmax')\n outputs = (tensor.addbroadcast(outputs[:, :1], 1)\n + self.softmax.apply(outputs[:, 1:]))\n if self.same_value_for_wrong:\n logger.debug('Same value for apriori wrong actions')\n wrong_output = outputs[:, 0]\n outputs = outputs[:, 1:]\n wrong_mask = tensor.ones_like(outputs)\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n outputs = (outputs * (1 - wrong_mask)\n + wrong_output[:, None] * wrong_mask)\n if self.groundtruth_word_bonus:\n logger.debug('Bonus for grondtruth words')\n wrong_mask = tensor.ones_like(outputs)\n wrong_mask = tensor.set_subtensor(\n wrong_mask[indices, groundtruth.T.flatten()], 0)\n w, = self.parameters\n bonuses = inputs['states'].dot(w)\n outputs = outputs + bonuses[:, None] * (1 - wrong_mask)\n if self.dueling_outputs:\n logger.debug('Dueling outputs a-la dueling networks')\n base_output = outputs[:, [0]]\n dueling_outputs = outputs[:, 1:]\n outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=1, keepdims=True)\n return outputs\n\n\nclass ActorCriticReadout(SoftmaxReadout):\n \"\"\"Actor-critic\n\n Params\n ------\n bos_token : int\n The token used to pad critic input. Critic needs to do\n at least one extra step compared to the actor in order\n to get the first glimpse of the ground-truth sequence\n before predicting the actual values.\n\n \"\"\"\n def __init__(self, reward_brick,\n compute_targets, solve_bellman,\n freeze_actor, freeze_critic, critic_uses_actor_states,\n critic_uses_groundtruth,\n critic=None, critic_burnin_steps=None,\n critic_loss=None,\n critic_policy_t=None,\n entropy_reward_coof=None, cross_entropy_reward_coof=None,\n trpo_coef=None,\n discount=None,\n value_penalty=None, value_penalty_type=None,\n accumulate_outputs=False, use_value_biases=None,\n actor_grad_estimate=None,\n bos_token=None,\n **kwargs):\n super(ActorCriticReadout, self).__init__(**kwargs)\n self.reward_brick = reward_brick\n self.critic = critic\n self.freeze_actor = freeze_actor\n self.freeze_critic = freeze_critic\n self.critic_uses_actor_states = critic_uses_actor_states\n self.critic_uses_groundtruth = (\n critic_uses_groundtruth if critic_uses_groundtruth is not None else True)\n self.critic_burnin_steps = (\n critic_burnin_steps if critic_burnin_steps is not None else 0)\n self.critic_loss = (\n critic_loss if critic_loss is not None else \"L2\")\n self.value_summand = Linear(output_dim=1, name='summand')\n self.softmax_t = 1.\n self.critic_policy_t = (\n critic_policy_t if critic_policy_t is not None else 1.0)\n self.epsilon = 0.\n self.discount = (\n discount if discount is not None else 1.)\n self.entropy_reward_coof = (\n entropy_reward_coof if entropy_reward_coof is not None else 0.)\n self.cross_entropy_reward_coof = (\n cross_entropy_reward_coof if cross_entropy_reward_coof is not None else 0.)\n self.trpo_coef = (\n trpo_coef if trpo_coef is not None else 0.)\n self.value_penalty = value_penalty\n self.value_penalty_type = (\n value_penalty_type if value_penalty_type is not None else \"L2\")\n self.compute_targets = compute_targets\n self.solve_bellman = solve_bellman\n self.accumulate_outputs = accumulate_outputs\n self.use_value_biases = (\n use_value_biases if use_value_biases is not None else True)\n self.actor_grad_estimate = (\n actor_grad_estimate if actor_grad_estimate else 'all_actions')\n self.bos_token = bos_token\n self.softmax = NDimensionalSoftmax()\n self.children += [reward_brick, self.value_summand, self.softmax]\n if self.critic:\n self.children.append(self.critic)\n self.costs.inputs += ['attended', 'attended_mask']\n\n def _push_allocation_config(self):\n super(ActorCriticReadout, self)._push_allocation_config()\n self.value_summand.input_dim = self.get_dim('attended')\n\n @application\n def scores(self, **inputs):\n merged = self.merge(**dict_subset(inputs, self.merge_names))\n return self.softmax.log_probabilities(\n merged * self.softmax_t, extra_ndim=merged.ndim - 2)\n\n @application\n def costs(self, application_call, prediction, prediction_mask,\n groundtruth, groundtruth_mask,\n **inputs):\n def _prediction_subtensor(data):\n if data.ndim != 3:\n raise ValueError\n flat_data = data.reshape((\n data.shape[0] * data.shape[1],\n data.shape[2]))\n flat_data = flat_data[\n tensor.arange(flat_data.shape[0]), prediction.flatten()]\n return flat_data.reshape((\n prediction.shape[0], prediction.shape[1]))\n\n attended = disconnected_grad(inputs.pop('attended'))\n attended_mask = disconnected_grad(inputs.pop('attended_mask'))\n\n # Compute the rewards\n rewards = self.reward_brick.apply(\n prediction, prediction_mask,\n groundtruth, groundtruth_mask)[:, :, 0]\n future_rewards = rewards[::-1].cumsum(axis=0)[::-1]\n\n # Compute the critic outputs\n if self.critic:\n padding = tensor.repeat(\n tensor.fill(prediction[0:1], self.bos_token), 1, axis=0)\n mask_padding = tensor.repeat(\n tensor.fill(prediction_mask[0:1], 1.), 1, axis=0)\n padded_prediction = tensor.concatenate([padding, prediction])\n padded_prediction_mask = tensor.concatenate([mask_padding, prediction_mask])\n if self.critic_uses_groundtruth:\n critic_context = groundtruth\n critic_context_mask = groundtruth_mask\n else:\n critic_context = tensor.zeros_like(groundtruth[0:1])\n critic_context_mask = tensor.zeros_like(groundtruth_mask[0:1])\n critic_kwargs = dict(\n prediction=padded_prediction, prediction_mask=padded_prediction_mask,\n groundtruth=critic_context, groundtruth_mask=critic_context_mask,\n inputs=critic_context, inputs_mask=critic_context_mask)\n\n if self.critic_uses_actor_states:\n extra_inputs = disconnected_grad(inputs['states'])\n # We don't need the very last hidden state of the actor\n # in extra_inputs. We have to add something instead for the shapes\n # to match. It doesn't matter at all, what exactly we add.\n critic_kwargs['extra_inputs'] = tensor.concatenate(\n [extra_inputs, tensor.zeros_like(extra_inputs[0:1])])\n critic_cg = ComputationGraph(self.critic.costs(**critic_kwargs))\n outputs, = VariableFilter(\n applications=[self.critic.generator.readout.all_outputs],\n roles=[OUTPUT])(critic_cg)\n # The first subtensor should be discarded, because it was outputted\n # for the padding. In addition to that Q-values from the first\n # 'critic_burnin_steps' will be ignored, see later in the code.\n outputs = outputs[1:]\n else:\n outputs = self.merge(**dict_subset(inputs, self.merge_names))\n prediction_outputs = _prediction_subtensor(outputs)\n\n # Compute Q adjustments\n adjustments = outputs\n prediction_adjustments = prediction_outputs\n if self.accumulate_outputs:\n prediction_adjustments = prediction_outputs.cumsum(axis=0)\n adjustments = tensor.inc_subtensor(\n adjustments[1:], prediction_adjustments[:-1][:, :, None])\n\n # Compute shared additive biases for all Q values\n if self.use_value_biases:\n value_biases = (\n self.value_summand.apply(attended)[:, :, 0]\n * attended_mask).sum(axis=0)\n else:\n value_biases = tensor.zeros_like(adjustments[0, :, 0])\n values = adjustments + value_biases[None, :, None]\n prediction_values = prediction_adjustments + value_biases[None, :]\n\n rolled_prediction_mask = tensor.roll(prediction_mask, -1, axis=0)\n rolled_prediction_mask = tensor.set_subtensor(\n rolled_prediction_mask[-1], 0)\n\n # Compute probabilities\n logs = self.scores(use_epsilon=False, **inputs)\n probs = tensor.exp(logs)\n if self.trpo_coef:\n logger.debug(\"Using TRPO coefficient of {}\".format(self.trpo_coef))\n old_probs = tensor.tensor3('probs')\n else:\n old_probs = tensor.zeros_like(probs)\n prediction_logs = _prediction_subtensor(logs)\n\n # Compute value targets\n value_targets = (disconnected_grad(probs) * values).sum(axis=-1)\n value_targets = tensor.roll(value_targets, -1, axis=0)\n value_targets = (self.discount * value_targets * rolled_prediction_mask\n + rewards)\n value_targets = value_targets.astype(theano.config.floatX)\n\n total_costs = 0\n\n # Compute critic cost\n if not self.compute_targets:\n logger.debug(\"Using given targets\")\n value_targets = tensor.matrix('value_targets')\n if self.solve_bellman == 'no':\n logger.debug(\"Not solving Bellman, just predicting the rewards\")\n value_targets = rewards.copy(name='value_targets')\n elif self.solve_bellman == 'without_dp':\n future_rewards = rewards[::-1].cumsum(axis=0)[::-1]\n logger.debug(\"Solving Bellman, but without DP\")\n value_targets = future_rewards\n elif self.solve_bellman is not True:\n raise ValueError()\n critic_errors = prediction_values - value_targets\n if self.critic_loss == 'L2':\n logger.debug(\"L2 loss for the critic\")\n critic_costs_per_char = critic_errors ** 2 * prediction_mask\n elif self.critic_loss == 'huber':\n logger.debug(\"Huber loss for the critic\")\n use_L2 = tensor.lt(abs(critic_errors), 0.5)\n critic_costs_per_char = (use_L2 * critic_errors ** 2 +\n (1 - use_L2) * abs(critic_errors)) * prediction_mask\n else:\n raise ValueError()\n critic_costs = critic_costs_per_char[self.critic_burnin_steps:].sum(axis=0)\n if not self.freeze_critic:\n total_costs += critic_costs\n\n # Compute critic Monte-Carlo cost\n critic_monte_carlo_costs = (\n (((prediction_values - future_rewards) ** 2) * prediction_mask)\n [self.critic_burnin_steps:].sum(axis=0))\n\n # Value penalty\n if self.value_penalty:\n logger.debug(\"Use value penalty\")\n if self.value_penalty_type == 'L2':\n value_deviations = (values - values.mean(axis=-1, keepdims=True)) ** 2\n elif self.value_penalty_type == 'L1':\n value_deviations = abs(values - values.mean(axis=-1, keepdims=True))\n else:\n raise ValueError(\"unknown value penalty type {}\".format(self.value_penalty_type))\n if not self.freeze_critic:\n total_costs += (\n self.value_penalty *\n (value_deviations.sum(axis=-1) * prediction_mask)\n [self.critic_burnin_steps:].sum(axis=0))\n\n # Compute actor cost\n if self.critic:\n # The actor cost will be minimized, that's why values\n # must be negated.\n est_name = self.actor_grad_estimate\n if est_name == 'all_actions':\n disadvantages = disconnected_grad(\n values.max(axis=-1)[:, :, None] - values)\n actor_costs = ((probs * disadvantages).sum(axis=-1)\n * prediction_mask)\n actor_costs = actor_costs[self.critic_burnin_steps:]\n elif est_name.startswith('1_action'):\n # Here we do not provide a target for the first step for\n # the reason we lack an estimate of the value of the initial state.\n # This is how our critic works.\n # Hopefully the network won't unlearn\n # to produce a BOS first.\n future_reward_estimate = (future_rewards\n if est_name.endswith('unbiased')\n else prediction_values)\n weights = -disconnected_grad(\n future_reward_estimate[1:] + rewards[:-1] - prediction_values[:-1])\n actor_costs = ((prediction_logs[1:] * weights) * prediction_mask[1:])\n actor_costs = actor_costs[self.critic_burnin_steps + 1:]\n else:\n raise ValueError\n actor_costs = actor_costs.sum(axis=0)\n\n actor_entropies = (probs * -logs).sum(axis=-1) * prediction_mask\n actor_entropies = actor_entropies[self.critic_burnin_steps:].sum(axis=0)\n old_actor_cross_entropies = (old_probs * -logs).sum(axis=-1) * prediction_mask\n old_actor_cross_entropies = old_actor_cross_entropies[self.critic_burnin_steps:].sum(axis=0)\n critic_policy = disconnected_grad(\n self.softmax.apply(self.critic_policy_t * values, extra_ndim=1))\n critic_cross_entropies = (\n (critic_policy * -logs).sum(axis=-1)\n * prediction_mask)\n critic_cross_entropies = critic_cross_entropies[self.critic_burnin_steps:].sum(axis=0)\n actor_costs_with_penalties = (\n actor_costs\n - self.entropy_reward_coof * actor_entropies\n # But really, should it be minus here, below?\n - self.cross_entropy_reward_coof * critic_cross_entropies\n + self.trpo_coef * old_actor_cross_entropies)\n if not self.freeze_actor:\n total_costs += actor_costs_with_penalties\n else:\n total_costs += disconnected_grad(actor_costs_with_penalties)\n\n # Add auxiliary variables for intermediate steps of the computation\n application_call.add_auxiliary_variable(\n rewards, name='rewards')\n application_call.add_auxiliary_variable(\n value_biases, name='value_biases')\n application_call.add_auxiliary_variable(\n values.copy(), name='values')\n application_call.add_auxiliary_variable(\n outputs.copy(), name='outputs')\n application_call.add_auxiliary_variable(\n prediction_values, name='prediction_values')\n application_call.add_auxiliary_variable(\n prediction_outputs, name='prediction_outputs')\n application_call.add_auxiliary_variable(\n value_targets.copy(), name='value_targets')\n application_call.add_auxiliary_variable(\n probs.copy(), name='probs')\n application_call.add_auxiliary_variable(\n prediction_logs, name='prediction_log_probs')\n\n # Compute some statistics for debugging\n last_character_mask = prediction_mask - rolled_prediction_mask\n last_character_costs = (critic_costs_per_char * last_character_mask).sum(axis=0)\n mean2_output = (\n ((prediction_outputs ** 2) * prediction_mask).sum()\n / prediction_mask.sum()) ** 0.5\n max_output = abs(prediction_outputs * prediction_mask).max()\n expected_reward = (probs[0] * values[0]).sum(axis=-1)\n application_call.add_auxiliary_variable(\n last_character_costs, name='last_character_costs')\n application_call.add_auxiliary_variable(\n critic_costs.mean(), name='mean_critic_cost')\n application_call.add_auxiliary_variable(\n critic_monte_carlo_costs.mean(), name='mean_critic_monte_carlo_cost')\n if self.critic:\n application_call.add_auxiliary_variable(\n actor_costs.mean(), name='mean_actor_cost')\n application_call.add_auxiliary_variable(\n actor_entropies.mean(), name='mean_actor_entropy')\n application_call.add_auxiliary_variable(\n expected_reward.mean(), name='mean_expected_reward')\n application_call.add_auxiliary_variable(\n mean2_output, name='mean2_output')\n application_call.add_auxiliary_variable(\n max_output, name='max_output')\n\n return total_costs\n","repo_name":"rizar/actor-critic-public","sub_path":"lvsr/bricks/readouts.py","file_name":"readouts.py","file_ext":"py","file_size_in_byte":23722,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"61"}
+{"seq_id":"23385357941","text":"f = open('tictac.in');\nout = open('tictac.out', 'w')\ncases = int(f.readline())\n\ndef getResult(board):\n\t# rows\n\tfor i in range(4):\n\t\tline = board[i]\n\t\tresult = winning_line(line)\n\t\tif result != False:\n\t\t\treturn result\n\n\t#cols\n\tfor i in range(4):\n\t\tline = [board[j][i] for j in range(4)]\n\t\tresult = winning_line(line)\n\t\tif result != False:\n\t\t\treturn result\n\n\t#diagonals\n\tline = [board[i][i] for i in range(4)]\n\tresult = winning_line(line)\n\tif result != False:\n\t\treturn result\n\n\tline2 = [board[i][3-i] for i in range(4)]\n\tresult = winning_line(line2)\n\tif result != False:\n\t\treturn result\n\ndef winning_line(line):\n\tif line.count('X') == 4 or (line.count('X') == 3 and 'T' in line):\n\t\treturn \"X won\"\n\telif line.count('O') == 4 or (line.count('O') == 3 and 'T' in line):\n\t\treturn \"O won\"\n\telse:\n\t\treturn False\n\ndef draw(board):\n\tfor i in range(4):\n\t\tfor j in range(4):\n\t\t\tif board[i][j] == '.':\n\t\t\t\treturn False\n\treturn True;\n\nfor case in range(1, cases+1):\n\tboard = [];\n\tfor _ in range(4):\n\t\tboard.append(f.readline())\n\n\tresult = getResult(board)\n\tif result is None:\n\t\tif draw(board):\n\t\t\tout.write(\"Case #\" + str(case) + \": Draw\\n\")\n\t\telse:\n\t\t\tout.write(\"Case #\" + str(case) + \": Game has not completed\\n\")\n\telse:\n\t\tout.write(\"Case #\" + str(case) + \": \" + result + \"\\n\")\n\tf.readline()\n\t# out.write('asdf')\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/876.py","file_name":"876.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9690126717","text":"import json\nimport logging\nimport util\nfrom conf import setting\nfrom flow.data.ffprobe import *\nfrom plot.media_info import MediaInfoPlot\nfrom conf import setting\n\n\nclass MediaInfoFlow:\n def __init__(self, filename, codec_type: str = None, stream_index: int = None, read_intervals: str = None):\n self.filename = filename\n self.codec_type = codec_type\n self.stream_index = stream_index\n self.read_intervals = read_intervals\n self.data = FFprobeData()\n self.data.format = FFprobeFormat(**self._get_format())\n if self.data.format.nb_streams:\n v_index = a_index = 0\n streams = self._get_streams(codec_type, stream_index)\n for stream in streams:\n is_video = bool(\"video\" in stream[\"codec_type\"])\n is_audio = bool(\"audio\" in stream[\"codec_type\"])\n if is_video:\n v_stream = FFprobeVideoStream(**stream)\n frames = self._get_frames(v_stream.codec_type, v_index)\n for frame in frames:\n v_frame = FFprobeVideoFrame(**frame)\n v_stream.frames.append(v_frame)\n packets = self._get_packets(v_stream.codec_type, v_index)\n for packet in packets:\n v_packet = FFprobeVideoPacket(**packet)\n v_stream.packets.append(v_packet)\n self.data.video.streams.append(v_stream)\n v_index += 1\n if is_audio:\n a_stream = FFprobeAudioStream(**stream)\n frames = self._get_frames(a_stream.codec_type, a_index)\n for frame in frames:\n a_frame = FFprobeAudioFrame(**frame)\n a_stream.frames.append(a_frame)\n packets = self._get_packets(a_stream.codec_type, a_index)\n for packet in packets:\n a_packet = FFprobeAudioPacket(**packet)\n a_stream.packets.append(a_packet)\n self.data.audio.streams.append(a_stream)\n a_index += 1\n\n def _get_select_stream_option(self, codec_type, stream_index):\n if not codec_type:\n return str()\n codec_flag = None\n if \"video\" in codec_type:\n codec_flag = \"v\"\n elif \"audio\" in codec_type:\n codec_flag = \"a\"\n if not codec_flag or stream_index is None:\n return str()\n return f\"-select_streams {codec_flag}:{stream_index}\"\n\n def _get_read_intervals_option(self):\n return f\"-read_intervals {self.read_intervals}\"\n\n def _get_option(self, codec_type, stream_index, extra_options):\n select_stream_opt = self._get_select_stream_option(codec_type, stream_index)\n read_intervals_opt = self._get_read_intervals_option()\n return f\"{select_stream_opt} {read_intervals_opt} {extra_options} -of json {self.filename}\"\n\n def _get_format(self):\n r = util.XPipe(f\"{setting.bin_ffprobe} -show_format -of json {self.filename}\").run()\n if r[\"code\"]:\n logging.info(r)\n return dict()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"format\"]\n\n def _get_streams(self, codec_type, stream_index):\n options = self._get_option(codec_type, stream_index, \"-show_streams\")\n r = util.XPipe(f\"{setting.bin_ffprobe} {options}\").run()\n if r[\"code\"]:\n logging.info(r)\n return list()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"streams\"]\n\n def _get_frames(self, codec_type, stream_index):\n options = self._get_option(codec_type, stream_index, \"-show_frames\")\n r = util.XPipe(f\"{setting.bin_ffprobe} {options}\").run()\n if r[\"code\"]:\n logging.info(r)\n return dict()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"frames\"]\n\n def _get_packets(self, codec_type, stream_index):\n options = self._get_option(codec_type, stream_index, \"-show_packets\")\n r = util.XPipe(f\"{setting.bin_ffprobe} {options}\").run()\n if r[\"code\"]:\n logging.info(r)\n return dict()\n rd = json.loads(s=r[\"stdout\"])\n return rd[\"packets\"]\n\n\nif __name__ == \"__main__\":\n flow = MediaInfoFlow(\"/opt/ffmpeg/sample/dota2/10-20.flv\", read_intervals=\"%+#30\")\n #logging.info(flow.data.dict())\n plot = MediaInfoPlot(flow.data)\n plot.show(10, 10)\n plot.save(10, 10, f\"{setting.dir_workspace}/plot.png\")\n\n","repo_name":"imssyang/python3","sub_path":"app/av-tool/source/flow/media_info.py","file_name":"media_info.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15161788467","text":"import socket,sys,os,engine,socket,time\nimport tkinter as form\nclass Connection:\n def __init__(self):\n self.Socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.Connected = False\n self.RemoteIPAddress = ''\n self.Port = 0\n @staticmethod\n def create_exists_connection(socket):\n c = Connection()\n c.Socket = socket\n (c.RemoteIPAddress,c.Port) = socket.getpeername()\n return c\n\n def connect(self,address = str, port = int):\n if not self.Connected:\n self.Socket.connect((address,port))\n self.Connected = True\n else:\n raise socket.error(\"Bağlantı zaten var.\")\n\n def disconnect(self):\n try:\n if self.Connected:\n self.Socket.disconnect()\n self.Connected = False\n except:\n raise socket.error(\"Bağlantı kapatılamadı.\")\n\n def listen(self,port = int):\n if port <= 0 :\n raise Exception(\"Port değeri yanlış.\")\n skt = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n try:\n skt.bind(('',port))\n skt.settimeout(10)\n skt.listen(1)\n \n self.Socket,host = skt.accept()\n self.Connected = True\n except socket.timeout:\n self.Connected = False\n form.messagebox.showinfo(\"Bağlantı yok\",\"Zaman aşımı : Bağlantı isteminde bulunmadı.\\n Lütfen sonra tekrar deneyiniz.\")\n except Exception as e:\n raise Exception(e.args[0])\n\n def send(self,text):\n if not self.Connected:\n raise socket.error(\"Bağlantı yok.\")\n try:\n buffer = bytes(text,\"UTF-8\")\n self.Socket.send(buffer)\n except:\n raise Exception(\"Veri gönderilirken hata ile karşılaşıldı. \")\n return True\n\n def receive(self):\n if not self.Connected:\n raise socket.error(\"Bağlantı yok.\")\n try:\n buffer = str(self.Socket.recv(16),encoding = \"UTF-8\")\n except:\n raise socket.error(\"Veri getirilirken hata oluştu.\")\n return buffer\n\n def close(self):\n if not self.Socket._closed:\n self.Socket.close()\n time.sleep(1)\n self.Connected = False\n self.Socket = None\n return True\n else:\n return False\n\nclass ConnectionForm:\n def __init__(self,master,title):\n self.Title = title\n self.Master = master\n self.Connection = False\n def render(self):\n self.root = form.Toplevel(master = self.Master)\n self.root.geometry(\"{0}x{1}+{2}+{3}\".format(250,250,int(self.root.winfo_screenwidth()/2)-125,int(self.root.winfo_screenheight()/2-125)))\n self.root.resizable(0,0)\n \n def onclosing():\n self.Master.deiconify()\n sys.exit(1)\n self.root.destroy()\n self.root.protocol(\"WM_DELETE_WINDOW\",onclosing)\n self.root.title(self.Title)\n self.__design__()\n self.root.mainloop()\n\n def __design__(self):\n\n self.IPAddress = \"localhost\"\n self.Port = \"1234\"\n self.Username = \"Oyuncu2\"\n self.root.tk_setPalette(background = \"cyan\",foreground = \"red\")\n\n hostLBL = form.Label(master = self.root,text = \"Bağlantı Noktası \",font=\"assets/font.ttf\",compound = \"left\")\n portLBL = form.Label(master = self.root,text = \"Port \",font=\"assets/font.ttf\",compound = \"right\")\n usernameLBL = form.Label(master = self.root,text = \"Kullanıcı adı \",font=\"assets/font.ttf\",compound = \"right\")\n\n hostTXT = form.Entry(master = self.root,width = 20,background = \"white\")\n portTXT = form.Entry(master = self.root,width = 20,background = \"white\")\n usernameTXT = form.Entry(master = self.root,width = 20,background = \"white\")\n\n img = form.PhotoImage(file = \"assets/enter.png\").subsample(2,2)\n join = form.Button(master = self.root,text = \"Katıl\",activebackground = \"red\",image = img,activeforeground = \"white\" ,width = 100,height = 30,justify = \"right\",compound = \"left\",bg = \"red\",fg = \"white\",cursor = \"hand2\")\n\n join.image = img\n connectF = form.Radiobutton(master = self.root,text = \"İstemci\",value = 1,activebackground = \"blue\",indicatoron = 0)\n listenF = form.Radiobutton(master= self.root,text = \"Sunucu\",value = 2,activebackground = \"blue\",indicatoron = 0)\n\n\n connectF.pack(anchor = \"w\",side = \"bottom\")\n listenF.pack(anchor = \"w\",side = \"bottom\")\n connectF.select()\n\n\n def _set_listen(event):\n hostTXT.config(state = \"disabled\")\n join.config(text = \"Oluştur\")\n self.conn_type =\"TO_LISTEN\"\n self.Username = \"Oyuncu1\"\n usernameTXT.delete(0,form.END)\n usernameTXT.insert(0,self.Username)\n join.bind(\"
\"\n\n@app.route(\"/words\", methods = ['POST'])\ndef index2():\n words = request.form['words']\n data = []\n\n try:\n dec = json.loads( words)\n print (\"wait data = \", dec [\"data\"] )\n obj = Words_stat(dec[\"data\"])\n\n print(\"obj = \", obj)\n data= obj.json()\n\n except:\n\n obj = Words_stat(words)\n print(\"obj = \", obj)\n\n data = obj.json()\n\n return data\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n\n\n","repo_name":"shaninandrew/words","sub_path":"MS_Words.py","file_name":"MS_Words.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8031405537","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('statisticscore', '0011_auto_20150820_1639'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='activedebate',\n name='active_debate',\n field=models.CharField(max_length=8, null=True, blank=True),\n ),\n ]\n","repo_name":"eyp-developers/statistics","sub_path":"statisticscore/migrations/0012_auto_20150822_1237.py","file_name":"0012_auto_20150822_1237.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"29256826263","text":"from __future__ import absolute_import\nimport logging\nimport pandas as pd\nimport os\nimport sys\nsys.path.append(os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + \"/../../..\"))\n\nfrom scripts.create import insert_many_rows\nfrom taipan.core import polar2cart\n\n\ndef execute(cursor, fields_file=None):\n \"\"\"Load field pointings from file to database\"\"\"\n\n logging.info(\"Loading Centroids\")\n\n if not fields_file:\n logging.info(\"No tiling file passed - aborting loading centroids\")\n return\n\n # Get centroids\n with open(fields_file, 'r') as fileobj:\n datatable = pd.read_csv(fileobj, delim_whitespace=True)\n values = [[index, row['ra'], row['dec']]\n + list(polar2cart((row['ra'], row['dec'])))\n for index, row in datatable.iterrows()]\n\n columns = [\"FIELD_ID\", \"RA\", \"DEC\", \"UX\", \"UY\", \"UZ\"]\n\n # Insert into database\n if cursor is not None:\n insert_many_rows(cursor, \"field\", values, columns=columns)\n logging.info('Loaded Centroids')\n else:\n logging.info('No DB to write to - returning values')\n return values\n\n return\n","repo_name":"Samreay/TaipanDB","sub_path":"resources/0.0.1/ingest/loadCentroids.py","file_name":"loadCentroids.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41069848278","text":"\"\"\"\nOne Away: There are three types of edits that can be performed on strings: insert a character, remove a character, or replace a character. \nGiven two strings, write a function to check if they are one edit (or zero edits) away.\n \nEXAMPLE\npale, ple -> true \npales, pale -> true \npale, bale -> true \npale, bake -> false\n\"\"\"\n\ndef oneAway(str1, str2):\n if abs(len(str1) - len(str2)) > 1:\n return False\n else:\n differences = 0\n firstStringFreq = {}\n for letter in str1:\n if letter not in firstStringFreq:\n firstStringFreq[letter] = 1\n else:\n firstStringFreq[letter] += 1\n for letter in str2:\n if letter not in firstStringFreq:\n differences += 1\n else:\n if firstStringFreq[letter] > 0:\n firstStringFreq[letter] -= 1\n else:\n differences += 1\n \n return differences <= 1\n\nprint(oneAway(\"pale\", \"pa le\"))","repo_name":"optionalg/CTCI_Python-1","sub_path":"Chapter 1 - Arrays and Strings/1.5.py","file_name":"1.5.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73740523714","text":"# Right Motor\nen_a = 4 # 7 BOARD\nin1 = 27 # 13 BOARD\nin2 = 17 # 11 BOARD\n\n# Left Motor\nin3 = 5 # 29 BOARD\nin4 = 6 # 31 BOARD\nen_b = 13 # 33 BOARD\n\nDEBUG_MODE = True\n\ntry:\n import RPi.GPIO as GPIO\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO_IMPORTED = True\n\n GPIO.setup(in1, GPIO.OUT)\n GPIO.setup(in2, GPIO.OUT)\n GPIO.setup(en_a, GPIO.OUT)\n\n GPIO.setup(in3, GPIO.OUT)\n GPIO.setup(in4, GPIO.OUT)\n GPIO.setup(en_b, GPIO.OUT)\n\n q = GPIO.PWM(en_a, 100) # 100 hertz\n p = GPIO.PWM(en_b, 100) # 100 hertz\n p.start(75) # 75% duty cycle\n q.start(75) # 75% duty cycle\n\n\nexcept ImportError:\n print(\"Error importing RPi.GPIO in motor_module\")\n GPIO_IMPORTED = False\n\n\ndef setup():\n if GPIO_IMPORTED:\n GPIO.output(in1, GPIO.LOW)\n GPIO.output(in2, GPIO.LOW)\n GPIO.output(in4, GPIO.LOW)\n GPIO.output(in3, GPIO.LOW)\n\n if DEBUG_MODE:\n print(\"GPIO Setup complete. Current pin status:\")\n print(\"in1:\", GPIO.input(in1))\n print(\"in2:\", GPIO.input(in2))\n print(\"in3:\", GPIO.input(in3))\n print(\"in4:\", GPIO.input(in4))\n print(\"en_a:\", GPIO.input(en_a))\n print(\"en_b:\", GPIO.input(en_b))\n\n\ndef forward():\n if GPIO_IMPORTED:\n GPIO.output(in1, GPIO.HIGH)\n GPIO.output(in2, GPIO.LOW)\n\n GPIO.output(in4, GPIO.HIGH)\n GPIO.output(in3, GPIO.LOW)\n\n if DEBUG_MODE:\n print(\"Moving motors forward\")\n print(\"pin status:\")\n print(\"in1:\", GPIO.input(in1))\n print(\"in2:\", GPIO.input(in2))\n print(\"in3:\", GPIO.input(in3))\n print(\"in4:\", GPIO.input(in4))\n print(\"en_a:\", GPIO.input(en_a))\n print(\"en_b:\", GPIO.input(en_b))\n\n\ndef backward():\n if GPIO_IMPORTED:\n GPIO.output(in1, GPIO.LOW)\n GPIO.output(in2, GPIO.HIGH)\n\n GPIO.output(in4, GPIO.LOW)\n GPIO.output(in3, GPIO.HIGH)\n\n if DEBUG_MODE:\n print(\"Moving motors backward\")\n\n\ndef right():\n if GPIO_IMPORTED:\n GPIO.output(in1, GPIO.HIGH)\n GPIO.output(in2, GPIO.LOW)\n\n GPIO.output(in4, GPIO.LOW)\n GPIO.output(in3, GPIO.LOW)\n\n if DEBUG_MODE:\n print(\"Moving motors right\")\n\n\ndef left():\n if GPIO_IMPORTED:\n GPIO.output(in4, GPIO.HIGH)\n GPIO.output(in3, GPIO.LOW)\n\n GPIO.output(in1, GPIO.LOW)\n GPIO.output(in2, GPIO.LOW)\n\n if DEBUG_MODE:\n print(\"Moving motors left\")\n\n\ndef stop():\n if GPIO_IMPORTED:\n GPIO.output(in1, GPIO.LOW)\n GPIO.output(in2, GPIO.LOW)\n\n GPIO.output(in4, GPIO.LOW)\n GPIO.output(in3, GPIO.LOW)\n\n if DEBUG_MODE:\n print(\"Stopping motors\")\n\n\ndef set_speed(speed):\n if GPIO_IMPORTED:\n # Ensure the speed is within the valid range (0 to 100)\n speed = max(0, min(100, speed))\n\n # Change the duty cycle of the PWM signals to adjust the speed\n p.ChangeDutyCycle(speed)\n q.ChangeDutyCycle(speed)\n\n if DEBUG_MODE:\n print(f\"Setting motors speed to {speed}%\")\n\n\n# GPIO.cleanup()\n","repo_name":"Jose-AE/TC1004B","sub_path":"M3-Project/Car/motor_module.py","file_name":"motor_module.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33170722464","text":"from turtle import Turtle\n\nSTARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]\nMOVE_DIST = 20\nUP = 90\nLEFT = 180\nDOWN = 270\nRIGHT = 0\n\n\nclass Snake:\n def __init__(self):\n self.segments = []\n for pos in STARTING_POSITIONS:\n self.add_segment(pos)\n self.head = self.segments[0]\n self.head.color('cyan')\n\n def move(self):\n for seg_num in range(len(self.segments) - 1, 0, -1):\n new_x = self.segments[seg_num - 1].xcor()\n new_y = self.segments[seg_num - 1].ycor()\n self.segments[seg_num].goto(new_x, new_y)\n self.head.fd(MOVE_DIST)\n\n def extend(self):\n self.add_segment(self.segments[-1].position())\n\n def add_segment(self, pos):\n t = Turtle(shape=\"square\")\n t.color(\"white\")\n t.pu()\n t.goto(pos)\n self.segments.append(t)\n\n def reset(self):\n for seg in self.segments:\n seg.goto(1000, 1000)\n self.segments.clear()\n for pos in STARTING_POSITIONS:\n self.add_segment(pos)\n self.head = self.segments[0]\n self.head.color('cyan')\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.setheading(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.setheading(DOWN)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.setheading(RIGHT)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.setheading(LEFT)\n","repo_name":"Randy1812/PythonProjects","sub_path":"Day 20 & 21 - SnakeGame/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13753035538","text":"from drl.algo.AgentConfig import AgentConfig\n\nclass DQNConfig(AgentConfig):\n def __init__(self):\n super().__init__()\n self.replay_fn = None\n self.target_network_update_freq = None\n self.exploration_steps = None\n self.history_length = None\n self.batch_size = None\n self.double_q = False\n self.tag = 'dqn'\n self.random_action_prob = None\n","repo_name":"wumo/drl","sub_path":"drl/algo/critic/dqn/DQNConfig.py","file_name":"DQNConfig.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23450447871","text":"def min_friends(audience): \n cumulative_people = 0\n num_friends = 0\n for i, num_people in enumerate(audience): \n if num_people > 0: \n if i > cumulative_people + num_friends: \n num_friends += i-cumulative_people\n cumulative_people += num_people\n return num_friends\n \nif __name__ == '__main__': \n num_tests = int(raw_input())\n for i in range(num_tests): \n _, audience_str = raw_input().strip().split()\n audience = []\n for num_people in audience_str: \n audience.append(int(num_people))\n print('Case #%d: %d' % (i+1, min_friends(audience)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1902.py","file_name":"1902.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8434897342","text":"import socket\nimport websocket\nimport _thread\nfrom datetime import datetime, time\nimport time\nimport json, random\nimport queue\nimport threading\nfrom threading import Thread\nimport traceback\n\n\n# ++++++++++++++++++++++++++++++++++++++\n# #\n# SMART FOOD KEEPER-FINAL PROJECT BY: #\n# SAMUEL P. MORONTA | YEHUDY DE PEÑA #\n# #\n# ++++++++++++++++++++++++++++++++++++++#\n\n\nHEADER = 64\nPORT = 6000\nSERVER = \"127.0.0.1\"\nADDR = (SERVER, PORT)\nFORMAT = 'utf-8'\nDISCONNECT_MESSAGE = \"!DISCONNECT\"\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\n\ndef on_message(ws, message):\n print(message)\n\n\ndef on_error(ws, error):\n print(error)\n\n\ndef on_close(ws, close_status_code, close_msg):\n print(\"### closed ###\")\n\n\ndef generate_container_data():\n \"\"\"\n This functions is to receive data from load cell\n and adjust the values to send it to the server\n in java with the endpoint [server/container]\n \"\"\"\n weight_data_in = round(random.uniform(0.00, 5.00), 2)\n container_data = {\n 'container':1,\n 'weight': weight_data_in\n }\n json_sensor_data = json.dumps(container_data, indent=4, default=str)\n\n return json_sensor_data\n\ndef send_sensor_data(ws):\n def run(*args):\n i = 0\n random.seed(datetime.now())\n temperature = round(random.uniform(30.00, 31.00), 2)\n humidity = round(random.uniform(60.00, 61.00), 2)\n server.listen()\n conn, addr = server.accept()\n print(f\"[NEW CONNECTION] {addr} connected.\")\n\n try:\n while True:\n print(\"[GETTING] Getting info from ML client\")\n\n msg_length = conn.recv(HEADER).decode(FORMAT)\n if msg_length:\n msg_length = int(msg_length)\n # Recibo todas las informaciones del reconocimiento\n msg = conn.recv(msg_length).decode(FORMAT)\n json_object = json.loads(msg)\n fruit_cant = json_object[\"fruitCant\"]\n fruit_type = json_object[\"fruitType\"]\n cant_overripe = json_object[\"cantOverripe\"]\n cant_ripe = json_object[\"cantRipe\"]\n cant_unripe = json_object[\"cantUnripe\"]\n deviceId = \"1\"\n\n reg_data = {\n \"deviceId\": deviceId,\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"fruitCant\": fruit_cant,\n \"fruitType\": fruit_type,\n \"cantOverripe\": cant_overripe,\n \"cantRipe\": cant_ripe,\n \"cantUnripe\": cant_unripe\n }\n json_reg_data = json.dumps(reg_data, indent=4, default=str)\n ws.send(json_reg_data)\n\n conn.close()\n ws.close()\n print(\"thread terminating...\")\n\n except Exception:\n print(traceback.format_exc())\n\n _thread.start_new_thread(run, ())\n\n\ndef simulate_realtime_ml_data_1(ws):\n def run(*args):\n while True:\n temperature = round(random.uniform(25.00, 38.00), 2)\n humidity = round(random.uniform(50.00, 60.00), 2)\n overripe = 3\n ripe = 1\n unripe = 1\n fruitCant = 4\n\n random_reg_data = {\n \"deviceId\": \"2\",\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"fruitCant\": fruitCant,\n \"fruitType\": \"papaya\",\n \"cantOverripe\": overripe,\n \"cantRipe\": ripe,\n \"cantUnripe\": unripe\n }\n json_reg_data = json.dumps(random_reg_data, indent=4, default=str)\n time.sleep(20)\n ws.send(json_reg_data)\n ws.close()\n _thread.start_new_thread(run, ())\n\n\ndef simulate_realtime_ml_data_2(ws):\n def run(*args):\n while True:\n temperature = round(random.uniform(25.00, 38.00), 2)\n humidity = round(random.uniform(50.00, 60.00), 2)\n overripe = 0\n ripe = 2\n unripe = 1\n fruitCant = 3\n\n random_reg_data = {\n \"deviceId\": \"3\",\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"fruitCant\": fruitCant,\n \"fruitType\": \"pineapple\",\n \"cantOverripe\": overripe,\n \"cantRipe\": ripe,\n \"cantUnripe\": unripe\n }\n json_reg_data = json.dumps(random_reg_data, indent=4, default=str)\n time.sleep(10)\n ws.send(json_reg_data)\n ws.close()\n _thread.start_new_thread(run, ())\n\n\ndef simulate_realtime_ml_data_3(ws):\n def run(*args):\n while True:\n temperature = round(random.uniform(30.00, 40.00), 2)\n humidity = round(random.uniform(55.00, 70.00), 2)\n overripe = 1\n ripe = 2\n unripe = 2\n fruitCant = 4\n\n random_reg_data = {\n \"deviceId\": \"4\",\n \"temperature\": temperature,\n \"humidity\": humidity,\n \"fruitCant\": fruitCant,\n \"fruitType\": \"papaya\",\n \"cantOverripe\": overripe,\n \"cantRipe\": ripe,\n \"cantUnripe\": unripe\n }\n json_reg_data = json.dumps(random_reg_data, indent=4, default=str)\n time.sleep(10)\n ws.send(json_reg_data)\n ws.close()\n _thread.start_new_thread(run, ())\n\n\ndef send_container_data(ws):\n def run(*args):\n i = 0\n while True:\n print(\"DIGITE UN PESO: \")\n x = input();\n container_data = {\n 'containerId': \"1\",\n 'weight': x\n }\n json_sensor_data = json.dumps(container_data, indent=4, default=str)\n ws.send(json_sensor_data)\n \"\"\"\n while True:\n print(\"[*] Sending data # [{}] to container \".format(i))\n\n weight_data_in = round(random.uniform(0, 5), 1)\n container_data = {\n 'containerId': \"1\",\n 'weight': weight_data_in\n }\n json_sensor_data = json.dumps(container_data, indent=4, default=str)\n time.sleep(10)\n ws.send(json_sensor_data)\n print(\"+++++++++++++++++++++++++++++++++++\")\n i = i + 1\n # time.sleep(1)\n ws.close()\n \"\"\"\n print(\"thread terminating...\")\n\n _thread.start_new_thread(run, ())\n\n\ndef connect_websocket_shelf():\n \"\"\"\n Function to connect websocket shelf endponit /server/shelf\n \"\"\"\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(\"ws://localhost:7000/server/shelf\", on_open=send_sensor_data, on_message=on_message,\n on_error=on_error, on_close=on_close)\n ws.run_forever()\n\n\ndef connect_websocket_shelf_simulate_1():\n \"\"\"\n Function to connect websocket shelf endponit /server/shelf\n \"\"\"\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(\"ws://localhost:7000/server/shelf\", on_open=simulate_realtime_ml_data_1, on_message=on_message,\n on_error=on_error, on_close=on_close)\n ws.run_forever()\n\ndef connect_websocket_container():\n \"\"\"\n Function to connect websocket container endponit /server/container\n \"\"\"\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(\"ws://localhost:7000/server/container\", on_open=send_container_data,\n on_message=on_message, on_error=on_error, on_close=on_close)\n ws.run_forever()\n\n\nif __name__ == \"__main__\":\n \"Threading to keep twice function running\"\n #Thread(target=connect_websocket_container).start()\n Thread(target=connect_websocket_shelf).start()\n #Thread(target=connect_websocket_shelf_simulate_1).start()","repo_name":"Samuel-P-Moronta/Ripeness_Fruit_Detection_Classification-SFK","sub_path":"core_websocket_handler.py","file_name":"core_websocket_handler.py","file_ext":"py","file_size_in_byte":8042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"17750367469","text":"daicoding='utf-8'\nimport os\nfrom torch.utils.data import DataLoader\nfrom lib.dataset import Data\nimport torch.nn.functional as F\nimport torch\nimport cv2\nimport time\nfrom net import Mynet\nimport numpy as np\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nif __name__ == '__main__':\n model_path= 'model/unalign.pth'\n out_path = './VT821'\n data = Data(root='./data/VT821_unalign/',mode='test')\n loader = DataLoader(data, batch_size=1,shuffle=False)\n net = Mynet().cuda()\n print('loading model from %s...' % model_path)\n net.load_state_dict(torch.load(model_path))\n if not os.path.exists(out_path): os.mkdir(out_path)\n time_s = time.time()\n img_num = len(loader)\n net.eval()\n with torch.no_grad():\n for rgb, t, _, (H, W), name in loader:\n print(name[0])\n scores = net(rgb.cuda().float(), t.cuda().float())\n score = F.interpolate(scores[-1], size=(H, W), mode='bilinear', align_corners=True)\n pred = np.squeeze(score.cpu().data.numpy())\n cv2.imwrite(os.path.join(out_path, name[0][:-4] + '.png'), 255 * pred)\n time_e = time.time()\n print('speed: %f FPS' % (img_num / (time_e - time_s)))\n\n\n\n","repo_name":"lz118/Deep-Correlation-Network","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"10531074587","text":"from turtle import Turtle\r\n\r\nclass ScoreBoard(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.right_player_score = 0\r\n self.left_player_score = 0\r\n self.color(\"white\")\r\n self.penup()\r\n self.hideturtle()\r\n self.update_score()\r\n\r\n def update_score(self):\r\n self.clear()\r\n self.goto(100, 230)\r\n self.write(arg=f\"{self.right_player_score}\", move=False, align=\"right\", font=('Courier', 48, 'bold'))\r\n self.goto(-100, 230)\r\n self.write(arg=f\"{self.left_player_score}\", move=False, align=\"left\", font=('Courier', 48, 'bold'))\r\n\r\n\r\n # R_player_score\r\n def r_score(self):\r\n self.right_player_score += 1\r\n self.update_score()\r\n\r\n # self.goto(100, 230)\r\n # # self.color(\"white\")\r\n # # self.penup()\r\n # self.write(arg=f\"{self.right_player_score}\", move=False, align=\"right\", font=('Courier', 48, 'bold'))\r\n # # self.hideturtle()\r\n\r\n def l_score(self):\r\n self.left_player_score += 1\r\n self.update_score()\r\n\r\n # self.goto(-100, 230)\r\n # # self.color(\"white\")\r\n # # self.penup()\r\n # self.write(arg=f\"{self.left_player_score}\", move=False, align=\"left\", font=('Courier', 48, 'bold'))\r\n # # self.hideturtle()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"harshalgajera043/Python","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41787621045","text":"# -*- coding: utf-8 -*-\nimport re\nimport json\nfrom collections import namedtuple\nfrom scrapy import FormRequest, Field, Item, Request\nfrom product_ranking.spiders import BaseProductsSpider\n\n\nclass InstagramUsersItem(Item):\n username = Field()\n followers = Field()\n total_posts = Field()\n posts = Field()\n\n # Search metadata.\n site = Field() # String.\n search_term = Field() # String.\n ranking = Field() # Integer.\n total_matches = Field() # Integer.\n results_per_page = Field() # Integer.\n scraped_results_per_page = Field() # Integer.\n search_term_in_title_exactly = Field()\n search_term_in_title_partial = Field()\n search_term_in_title_interleaved = Field()\n _statistics = Field()\n\n\nclass InstagramCrawlerSpider(BaseProductsSpider):\n name = \"instagram_users_products\"\n allowed_domains = [\"instagram.com\"]\n posts_url = 'https://www.instagram.com/query/'\n\n def __init__(self, *args, **kwargs):\n super(InstagramCrawlerSpider, self).__init__(\n site_name=\"instagram.com\",\n *args, **kwargs)\n self.product_url = kwargs['product_url']\n\n self.comments = []\n self.likes = []\n self.num_pages = 1\n\n @staticmethod\n def valid_url(url):\n if not re.findall(r\"http(s){0,1}\\:\\/\\/\", url):\n url = \"http://\" + url\n return url\n\n def start_requests(self):\n yield Request(url=self.valid_url(self.product_url),\n meta={'remaining': 99999,\n 'search_term': ''},\n callback=self._parse_single_product)\n\n def _parse_single_product(self, response):\n # extracting json data from