diff --git "a/2752.jsonl" "b/2752.jsonl" new file mode 100644--- /dev/null +++ "b/2752.jsonl" @@ -0,0 +1,296 @@ +{"seq_id":"9437018169","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.core.files.base import ContentFile\nfrom .forms import canvasForm\nfrom .models import Canvas\nimport base64\n# Create your views here.\nfrom django.core.files.base import ContentFile\n# Create your views here.\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f\"New Account Created : {username}\")\n login(request, user)\n messages.info(request, f\"You are Now logged in : {username}\")\n return redirect(\"/home\")\n else:\n for msg in form.error_messages:\n messages.error(request, f\"{msg}:{form.error_messages[msg]}\")\n\n form = UserCreationForm()\n return render(request,'register.html',{'form':form})\n\ndef logout_req(request):\n logout(request)\n messages.info(request, \"Logout successful\")\n return redirect(\"/\")\n\ndef login_req(request):\n if request.method == 'POST':\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"You are logged in : {username}\")\n return redirect(\"/home\")\n else:\n messages.error(request, \"Invalid username and password\")\n form = AuthenticationForm()\n return render(request, \"login.html\", {'form':form})\n\ndef updesign(request):\n form = canvasForm()\n if request.method == 'POST':\n form = canvasForm(request.POST, request.FILES)\n if form.is_valid():\n image_b64 = request.POST.get('design_image')\n format, imgstr = image_b64.split(';base64,')\n ext = format.split('/')[-1]\n data = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)\n customer = request.POST['customer']\n if request.user.is_authenticated:\n customer = request.user\n form, created = Canvas.objects.get_or_create(customer=str(customer), design_image=data)\n form.save() \n else:\n form = canvasForm()\n context = {'form':form}\n return render(request, 'homepage.html', context)","repo_name":"RohitChauhan916/django-canvas","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"33433442929","text":"def arithmetic_arranger(questions, show_answers = False):\n \n # get elements\n n1s = []\n n2s = []\n ops = []\n \n for q in questions:\n n1s.append(q.split()[0])\n n2s.append(q.split()[2])\n ops.append(q.split()[1])\n \n # CHECK ERROR\n # too many problems?\n if len(questions) > 5:\n return 'Error: Too many problems.'\n\n \n # + - only?\n for op in ops:\n if op == '+' or op == '-':\n continue\n else:\n return \"Error: Operator must be '+' or '-'.\"\n \n \n # digit only?\n for n1 in n1s:\n if n1.isdigit():\n continue\n else:\n return 'Error: Numbers must only contain digits.'\n \n for n2 in n2s:\n if n2.isdigit():\n continue\n else:\n return 'Error: Numbers must only contain digits.'\n \n # max 4 digit?\n for n1 in n1s:\n if len(n1) < 5:\n continue\n else:\n return 'Error: Numbers cannot be more than four digits.' \n \n for n2 in n2s:\n if len(n2) < 5:\n continue\n else:\n return 'Error: Numbers cannot be more than four digits.' \n \n \n #\n total_len = [max(len(n1),len(n2)) + 2 for n1, n2 in zip(n1s, n2s)]\n \n \n # answers\n answers = []\n for n1,n2,op in zip(n1s,n2s,ops):\n temp = int(n1) + int(n2) if op == '+' else int(n1) - int(n2)\n answers.append(str(temp))\n \n \n # create line_1\n line_1 = ''\n for n1,total in zip(n1s,total_len):\n line_1 = line_1 + n1.rjust(total) + ' '\n \n \n # create line_2 \n line_2 = ''\n for n2,op,total in zip(n2s,ops,total_len):\n line_2 = line_2 + op + ' ' + n2.rjust(total-2) + ' '\n \n \n # create line_3\n line_3 = ''\n for total in total_len:\n line_3 = line_3 + '-' * total + ' '\n \n \n # create line_4, ans\n line_4 = ''\n for ans, total in zip(answers,total_len):\n line_4 = line_4 + ans.rjust(total) + ' '\n \n\n # output\n if show_answers == True:\n return line_1.rstrip() + '\\n' + line_2.rstrip() + '\\n' + line_3.rstrip() + '\\n' + line_4.rstrip()\n else:\n return line_1.rstrip() + '\\n' + line_2.rstrip() + '\\n' + line_3.rstrip()\n \n","repo_name":"charliesong66/freecodecamp-Arithmetic-Formatter","sub_path":"arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"20019556097","text":"import math\n\n\ndef polysum(n, s):\n \"\"\"\n Computes the sum of area and square of the\n perimeter of the regular polygon, rounded\n to 4 decimal places\n\n :param n: number of sides\n :param s: length of each side\n :return: float\n \"\"\"\n\n area = (0.25 * n * s**2) / math.tan(math.pi/n)\n square_perimeter = (s * n)**2\n\n result_sum = area + square_perimeter\n\n return round(result_sum, 4)\n","repo_name":"vulcanoio/mit-cs","sub_path":"lectures/polysum.py","file_name":"polysum.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"9005472148","text":"from battle import Battle\nfrom classes import Quest, TreasureChest\nfrom creatureclass import Hero, Enemy\nfrom inventory import *\n\n\nplayer = Hero(\n name=\"Thorin Oakenshield\",\n level=1,\n experience=0,\n race=\"Dwarf\",\n max_health=150,\n health=100,\n strength=15,\n dexterity=8,\n defense=5,\n statuses=\"Sick\",\n inventory=Inventory([]),\n is_alive=True,\n spellbook=None,\n max_mana=20,\n mana=10,\n)\n\nenemy = Enemy(\n name=\"Azog The Defiler\",\n level=1,\n experience=0,\n race=\"Orc\",\n max_health=70,\n health=70,\n strength=10,\n dexterity=5,\n defense=10,\n statuses=\"Stunned\",\n inventory=Inventory(),\n is_alive=True,\n spellbook=None,\n weakness=\"Cold\",\n resistance=\"Fire\",\n)\nexcalibur = Weapon(\n name=\"Excalibur\",\n description=\"Long Sword, good for Goblins\",\n value=50,\n slot_size=1,\n required_strength=15,\n required_dexterity=15,\n allowed_race=\"Human\",\n max_durability=10,\n durability=2,\n inventory=Inventory,\n damage_type=\"Fire\",\n damage=10,\n is_equipped=False,\n)\nleather_armor = Armor(\n name=\"Leather Jacket\",\n description=\"Good for Humans\",\n value=30,\n slot_size=1,\n required_strength=5,\n required_dexterity=0,\n allowed_race=\"Human\",\n max_durability=15,\n durability=10,\n inventory=Inventory,\n resistance=\"Cold\",\n protection=5,\n is_equipped=False,\n)\nquest = Quest(\n name=\"Find Blueberries.\",\n description=\"Find 7 Blueberries in the Forest.\",\n reward={\"Blueberry\": 7},\n completed=False,\n)\n\nlife_potion = Consumable(\n name=\"Life Potion\",\n description=\"Restores 20 health.\",\n value=20,\n slot_size=1,\n required_strength=0,\n required_dexterity=0,\n allowed_race=\"Human\",\n max_durability=1,\n durability=1,\n inventory=Inventory,\n heal=20,\n mana=0,\n strength=0,\n dexterity=0,\n)\nboost_potion = Consumable(\n name=\"Boost Potion\",\n description=\"Restores 10 health and 10 mana.\",\n value=20,\n slot_size=1,\n required_strength=0,\n required_dexterity=0,\n allowed_race=\"Human\",\n max_durability=1,\n durability=1,\n inventory=Inventory,\n heal=90,\n mana=10,\n strength=0,\n dexterity=0,\n)\nstrength_potion = Consumable(\n name=\"Strenght Potion\",\n description=\"Increases Strength by 20\",\n value=20,\n slot_size=1,\n required_strength=0,\n required_dexterity=0,\n allowed_race=\"Human\",\n max_durability=1,\n durability=1,\n inventory=Inventory,\n heal=0,\n mana=0,\n strength=15,\n dexterity=0,\n)\n\n\n# test for Hero class\ndef test_hero_creation():\n assert player.name == \"Thorin Oakenshield\"\n assert player.level == 1\n assert player.experience == 0\n assert player.race == \"Dwarf\"\n assert player.max_health == 150\n assert player.health == 100\n assert player.strength == 15\n assert player.dexterity == 8\n assert player.defense == 5\n assert player.statuses == \"Sick\"\n assert player.inventory.items == []\n assert player.max_mana == 20\n assert player.mana == 10\n\n\n# test for Enemy class\ndef test_enemy_creation():\n assert enemy.name == \"Azog The Defiler\"\n assert enemy.level == 1\n assert enemy.experience == 0\n assert enemy.race == \"Orc\"\n assert enemy.max_health == 70\n assert enemy.health == 70\n assert enemy.strength == 10\n assert enemy.dexterity == 5\n assert enemy.defense == 10\n assert enemy.statuses == \"Stunned\"\n assert enemy.inventory.items == []\n assert enemy.weakness == \"Cold\"\n assert enemy.resistance == \"Fire\"\n\n\n# test weapon creation\ndef test_weapon_creation():\n assert excalibur.name == \"Excalibur\"\n assert excalibur.description == \"Long Sword, good for Goblins\"\n assert excalibur.value == 50\n assert excalibur.slot_size == 1\n assert excalibur.required_strength == 15\n assert excalibur.required_dexterity == 15\n assert excalibur.allowed_race == \"Human\"\n assert excalibur.damage_type == \"Fire\"\n assert excalibur.damage == 10\n\n\n# test armor creation\ndef test_armor_creation():\n armor = leather_armor\n assert armor.name == \"Leather Jacket\"\n assert armor.description == \"Good for Humans\"\n assert armor.value == 30\n assert armor.slot_size == 1\n assert armor.required_strength == 5\n assert armor.required_dexterity == 0\n assert armor.allowed_race == \"Human\"\n assert armor.resistance == \"Cold\"\n assert armor.protection == 5\n assert armor.is_equipped == False\n\n\n# test create and complete quest\ndef test_quest_creation():\n assert quest.name == \"Find Blueberries.\"\n assert quest.description == \"Find 7 Blueberries in the Forest.\"\n assert quest.reward == {\"Blueberry\": 7}\n assert quest.completed == False\n quest.complete_quest()\n assert quest.completed == True\n\n\n# test weapon equip\ndef test_equip_weapon():\n assert player.strength == 15\n player.equip_weapon(excalibur)\n assert excalibur.is_equipped == True\n player.unequip_weapon(excalibur)\n assert excalibur.is_equipped == False\n assert player.strength == 15\n\n\n# test armor equip\ndef test_equip_armor():\n assert player.defense == 5\n player.equip_armor(leather_armor)\n assert leather_armor.is_equipped == True\n player.unequip_armor(leather_armor)\n assert leather_armor.is_equipped == False\n assert player.defense == 5\n\n\n# test add to inventory, degrade, destroy, remove from inventory\ndef test_inventory_add_degrade_destroy():\n inventory = Inventory(slots=20)\n inventory.add_item(excalibur) # takes up 1 slot\n inventory.add_item(leather_armor) # takes up 1 slot\n inventory.upgrade(5) # add 5 slots\n print(inventory.items)\n assert inventory.items == [excalibur, leather_armor]\n assert inventory.slots == 23\n assert excalibur in inventory.items\n assert leather_armor in inventory.items\n assert \"Hammer\" not in inventory.items\n excalibur.degrade(player)\n assert excalibur.durability == 1\n excalibur.destroy()\n assert inventory.items == [leather_armor]\n\n\n# test taking damage by player and enemy\ndef test_take_damage():\n player.take_damage(enemy.strength)\n assert player.health == 90\n assert player.is_alive == True\n player.take_damage(enemy.strength)\n assert player.is_alive == True\n player.take_damage(enemy.strength + 80)\n assert player.is_alive == False\n\n\n# test for battle system and die if health =< 0\ndef test_battle():\n player.health = 100\n enemy.health = 70\n battle = Battle(player, enemy)\n assert battle.turn == 0\n player.attack(enemy)\n assert enemy.health == 65\n enemy.attack(player)\n assert player.health == 95\n player.equip_weapon(excalibur)\n player.attack(enemy)\n\n\n# test consumables\ndef test_consumables():\n inventory = Inventory([], 5, 20)\n inventory.add_item(life_potion)\n inventory.add_item(boost_potion)\n inventory.add_item(strength_potion)\n assert player.health == 95\n assert player.mana == 10\n assert player.strength == 25\n assert life_potion in inventory.items\n player.use_consumable(life_potion)\n assert player.health == 115\n assert inventory.items == [boost_potion, strength_potion]\n player.use_consumable(boost_potion)\n assert player.health == 150\n assert player.mana == 20\n assert inventory.items == [strength_potion]\n player.use_consumable(strength_potion)\n assert player.strength == 40\n assert inventory.items == []\n\n\n# test treasure chest\ndef test_treasure_chest():\n chest = TreasureChest(items=[excalibur])\n assert chest.items == [excalibur]\n","repo_name":"Skaz7/heros_battle","sub_path":"test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"73820404542","text":"import sys\nimport os\nfrom typing import Tuple, List, Optional\nfrom conllu import TokenList\nfrom conllu.serializer import serialize_field\nimport cassis\n#from io import open\nfrom conllu import parse_incr\nfrom tqdm import tqdm\n\n\nsys.path.append(\"/home/stud_homes/s5935481/uima_cassis/src\")\nsys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))\n\n\nROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))\n\n\ndef read_in_tiger_corpus(rm_multirootsents: bool = True) -> List[TokenList]:\n \"\"\"\n Function for reading tiger-corpus-native conllu file\n and return it as correctly formatted TokenLists.\n :return:\n \"\"\"\n path = os.path.join(ROOT_DIR,\n \"data/conllu_files/tiger_release_aug07.corrected.16012013.conll09\")\n with open(path, \"r\", encoding=\"utf-8\") as f:\n # ==== List to store all TokenLists (one for each sentence) ====\n sents_in_conllu = []\n # --> setting var for counting sents and text:\n sent_id = -1\n text = \"\"\n root_count = 0\n multi_root_sent_count = 0\n # --> Creating TokenList for one conllu-Sentence:\n compiled_toks = TokenList()\n for line in tqdm(f):\n if line == \"\\n\":\n if len(compiled_toks) != 0:\n sent_id += 1\n # --> Filling in Metadata:\n compiled_toks.metadata = {\"sent_id\": str(sent_id), \"text\": text.rstrip()}\n if rm_multirootsents and root_count > 1:\n multi_root_sent_count += 1\n else:\n sents_in_conllu.append(compiled_toks)\n # --> Clearing:\n compiled_toks = TokenList()\n text = \"\"\n root_count = 0\n else:\n # --> Filling in Token Information for each token in sent:\n line = line.split(\"\\t\")\n\n if line[4][0] == \"$\":\n deprel_label = \"PUNCT\"\n else:\n deprel_label = line[10]\n\n if line[8] == \"0\":\n root_count += 1\n\n compiled_toks.append({'id': line[0].split(\"_\")[-1],\n 'form': line[1],\n 'lemma': line[2],\n 'upostag': \"_\",\n 'xpostag': line[4],\n 'feats': line[6],\n 'head': line[8],\n 'deprel': deprel_label,\n 'deps': '_',\n 'misc': '_'})\n text += f\"{line[1]} \"\n\n\n print(f\"n-Sents with multi-root: {multi_root_sent_count}\")\n\n return sents_in_conllu\n\n\ndef export_tiger_corpus(rm_multirootsents: bool = True):\n \"\"\"\n Write Tiger-Corpus train/dev/test set to conllu-files.\n :return:\n \"\"\"\n # ==== Getting all TokenLists (1/sent) ====\n sents_in_conllu = read_in_tiger_corpus(rm_multirootsents=rm_multirootsents)\n sent_count = len(sents_in_conllu)\n print(f\"{sent_count} Sentences are in the Tiger-Corpus.\")\n\n # ==== Saving as train, test, dev set ====\n train = sents_in_conllu[:int(sent_count*0.7)]\n dev = sents_in_conllu[int(sent_count*0.7):int(sent_count*0.85)]\n test = sents_in_conllu[int(sent_count*0.85):]\n\n if rm_multirootsents:\n root = \"singleroot\"\n else:\n root = \"multiroot\"\n\n # --> train:\n with open(os.path.join(ROOT_DIR, f\"data/conllu_files/tiger_train_{root}.conllu\"), \"w\") as f:\n for compiled_sentence in train:\n f.write(compiled_sentence.serialize())\n # --> test:\n with open(os.path.join(ROOT_DIR, f\"data/conllu_files/tiger_test_{root}.conllu\"), \"w\") as f:\n for compiled_sentence in dev:\n f.write(compiled_sentence.serialize())\n # --> dev:\n with open(os.path.join(ROOT_DIR, f\"data/conllu_files/tiger_dev_{root}.conllu\"), \"w\") as f:\n for compiled_sentence in test:\n f.write(compiled_sentence.serialize())\n\n\nexport_tiger_corpus()","repo_name":"LeonHammerla/train_parser","sub_path":"utility/tiger_corpus_utility.py","file_name":"tiger_corpus_utility.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"30024489139","text":"#https://codeforces.com/problemset/problem/1646/B\n#Lang\t\t:\tPython 3.8\n#Time\t\t:\t187 ms\n#Memory :\t17900 KB\nfor _ in range(int(input())):\n n=int(input())\n a=list(map(int,input().split()))[:n]\n a.sort()\n r1,l2=(n-1)//2,(n+2)//2\n b,r=sum(a[:r1+1]),sum(a[l2:])\n print(\"YES\" if r>b else \"NO\")\n","repo_name":"lilberick/Competitive-programming","sub_path":"online-judge-solutions/Codeforces/1646B.py","file_name":"1646B.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"24"} +{"seq_id":"15782625864","text":"\"\"\"\nCase 5.\nDevelopers: Kabaev A., Anufrienko K.\n\"\"\"\n\nfrom class_declaration import Hotel, Client, Room\n\n\ndef main():\n hotel = Hotel()\n with open('fund.txt') as f:\n rooms_list = list(map(lambda rooms_data: rooms_data.rstrip().split(), f.readlines()))\n rooms = [Room(*room_info, hotel=hotel) for room_info in rooms_list]\n clients_amount = []\n money_earn = []\n money_lose = []\n type_list = []\n with open('booking.txt') as f:\n clients_list = list(map(lambda client_data: client_data.rstrip().split(), f.readlines()))\n clients = [Client(*client_info, hotel=hotel) for client_info in clients_list]\n for hotel_client in clients:\n reg_params = hotel_client.registration()\n print('Поступила заявка на бронирование:\\n')\n print('{} {} {} {} {} {} {} {}\\n'.format(hotel_client.register_date, hotel_client.surname,\n hotel_client.name, hotel_client.middle_name,\n hotel_client.people, hotel_client.from_date,\n hotel_client.days, hotel_client.money))\n reg_options = reg_params['best_choice_options']\n if reg_params['best_choice'] is None:\n print('Предложений по данному запросу не найдено. В бронировании отказано.\\n')\n else:\n print('Найден:\\n')\n outinfo = [\n reg_params['best_choice'].number,\n reg_options['type_room'],\n reg_options['comfort'],\n reg_params['best_choice'].capacity,\n hotel_client.people,\n f' {reg_options[\"food\"]}',\n reg_options['price']\n ]\n print(reg_options['type_room'])\n print('номер {} {} {} рассчитан на {} чел. фактически {} чел.{} стоимость {} руб./сутки'.format(*outinfo))\n if reg_params['answer'] is True:\n print('Клиент согласен. Номер забронирован.\\n')\n clients_amount.append(hotel_client)\n type_list.append(reg_options['type_room'])\n money_earn.append(reg_options['price'] * int(hotel_client.days))\n else:\n print('Клиент отказался от варианта'),\n money_lose.append(reg_options['price'] * int(hotel_client.days))\n hotel.bron = len(clients_amount)\n hotel.profit = sum(money_earn)\n hotel.loss = sum(money_lose)\n hotel.percent = str(round((len(rooms) - len(clients_amount)) * 100 / len(rooms), 2)) + ' %'\n for el in type_list:\n type_list.append(el.type_room)\n type_dict = {\n 'одноместный': type_list.count('одноместный'),\n 'двухместный': type_list.count('двухместный'),\n 'полулюкс': type_list.count('полулюкс'),\n 'люкс': type_list.count('люкс')\n }\n hotel.booked_single = type_dict['одноместный']\n hotel.booked_double = type_dict['двухместный'],\n hotel.booked_junior = type_dict['полулюкс']\n hotel.booked_luxury = type_dict['люкс']\n print(hotel)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"AntonKAb/Case_5_2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"5136098130","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name=\"home\"),\n path('login/', views.login, name=\"login\"),\n path('register/', views.register, name=\"register\"),\n path('dashboard/', views.dashboard, name=\"dashboard\"),\n path('profile/', views.profile, name=\"profile\"),\n path('learn/', views.learn, name=\"learn\"),\n path('course/', views.course, name=\"course\"),\n path('edit-profile/', views.edit_profile, name=\"edit_profile\"),\n path('forgotpassword/', views.forgotpassword, name=\"forgotpassword\"),\n path('blog/posts/', views.blog_details, name=\"blog_details\"),\n path('jobs/center', views.jobcenter, name=\"jobcenter\"),\n path('jobs/post', views.postjob, name=\"postjob\"),\n path('jobs/', views.job, name=\"job\"),\n path('jobs//job-application', views.apply, name=\"apply\"),\n path('blog/', views.blog, name=\"blog\"),\n path('logout/', views.logout, name=\"logout\"),\n path('addComment/', views.addComment, name=\"addComment\"),\n path('addCommentLike/', views.addCommentLike, name=\"addCommentLike\")\n\n \n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"bodegram/codedev","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"36239443615","text":"# given a sorted array\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n\n n = len(nums)\n\n i = 0\n j = n-1\n\n while i target:\n j-=1\n else:\n i+=1\n \n return [-1,-1]","repo_name":"pradeepkarthik77/DSA_Practice","sub_path":"Coding_Patterns/Two_Pointers/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"40505770165","text":"from pathlib import Path\n\nimport pytest\n\nfrom ocrmypdf_papermerge.utils import get_page_number, get_result_file_path\n\n\n@pytest.mark.parametrize(\n 'input_path,output_number',\n [\n (Path(\"/tmp/media/000001_ocr.jpeg\"), 1),\n (Path(\"/tmp/media/000023_ocr.jpeg\"), 23),\n (Path(\"/tmp/000003_ocr.png\"), 3),\n (Path(\"/tmp/000005_ocr_hocr.hocr\"), 5),\n (Path(\"/tmp/000005_ocr_hocr.txt\"), 5),\n (Path(\"/tmp/asd.45.tmp/000005_ocr_hocr.txt\"), 5),\n (Path(\"/tmp/asd.000123.tmp/000005_ocr_hocr.txt\"), 5),\n ]\n)\ndef test_get_page_number_positive_input(input_path, output_number):\n page_num_str = get_page_number(input_path)\n assert page_num_str == output_number\n\n\ndef test_get_page_number_negative_input():\n\n with pytest.raises(ValueError):\n get_page_number(10) # noqa\n\n with pytest.raises(ValueError):\n # must be at least 6 characters long\n get_page_number(\"\") # noqa\n\n with pytest.raises(ValueError):\n # must be at least 6 characters long\n get_page_number(\"1234\") # noqa\n\n with pytest.raises(ValueError):\n # did not match\n get_page_number(\"_001.png\") # noqa\n\n\ndef test_get_result_file_path_positive_input():\n # all following calls use makedirs=False argument.\n # This way `get_result_file_path` will skip validation/creation of\n # missing folder\n output_file_path = get_result_file_path(\n input_file_path=Path(\"/tmp/media/000001_ocr.png\"),\n base_dir=Path(\"/ocr/\"),\n uuids=['8db234f4-9579-4dd8-86c9-2564d45de1ce'],\n output_ext=\"jpeg\",\n makedirs=False\n )\n\n assert output_file_path == Path(\n \"/ocr/8d/b2/8db234f4-9579-4dd8-86c9-2564d45de1ce/page.jpeg\"\n )\n\n output_file_path = get_result_file_path(\n input_file_path=Path(\"/tmp/000002_ocr.png\"),\n base_dir=Path(\"/media/\"),\n uuids=[\n '8db234f4-9579-4dd8-86c9-2564d45de1ce',\n 'ed06dc8c-6675-47d4-ad41-1edb8c43030c' # UUID for the second page\n ],\n output_ext=\"txt\",\n makedirs=False\n )\n\n expected = Path(\n \"/media/ed/06/ed06dc8c-6675-47d4-ad41-1edb8c43030c/page.txt\"\n )\n assert output_file_path == expected\n","repo_name":"papermerge/OCRmyPDF_papermerge","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"73896292541","text":"# 연습문제 1\nnumber1 = [3.5, 3.4, 2.0, 4.6]\nfor n in range(len(number1)):\n number1[n] = round(number1[n])\n\nls = list(map(round, number1))\n\n# 연습문제 2\nls1 = [1, 2, 3, 4]\nls2 = [10, 20, 30, 40]\ndef addlist(x, y):\n lst = [x[i] + y[i] for i in range(len(x))]\n print(lst)\n return lst\n\nlst = list(map(lambda x, y: x + y, ls1, ls2))","repo_name":"JngMkk/TIL","sub_path":"BigData/Python/Python기초/06_Day6/2_def_ex.py","file_name":"2_def_ex.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"32585587025","text":"import os\n\nimport boto3\n\n\ndef lambda_handler(event, _):\n s3 = boto3.client('s3')\n bucket_name = os.environ['BUCKET_NAME']\n key = f'text/text_{event[\"execution_name\"]}'\n text = event['text']\n\n s3.put_object(\n Body=text,\n Bucket=bucket_name,\n ContentType='text/plain; charset=utf-8',\n Key=key,\n )\n\n return {\n 'bucket': bucket_name,\n 'key': key,\n 'body': f'Successfully uploaded file: {key} to bucket: {bucket_name}'\n }\n\n\nif __name__ == '__main__':\n lambda_event = {\n 'text': 'x=this is text that will be saved to s3',\n 'email': 'dev@example.com',\n 'execution_name': 'dev@example.com-57224180'\n }\n print(lambda_handler(lambda_event, None))\n","repo_name":"szmktk/tts-state-machine","sub_path":"lambdas/save_text_to_s3.py","file_name":"save_text_to_s3.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"11285822711","text":"import sys\n\nargs = sys.argv\ndeparture_station = (args[1]) # 出発駅\narrival_station = (args[2]) # 到着駅\n\nmap ={\n '東京':0, '品川':6.78, '新横浜':25.54, \n '名古屋':342.02, '京都':476.31, '新大阪':515.35 \n}\n\ntry:\n distance = map[args[2]] - map[args[1]]\n print(abs(round(distance, 2)), end=\"\")\nexcept:\n print(\"のぞみの停車駅を引数に設定してください\", end=\"\")","repo_name":"KentoHirabayashi/Team_H_Python","sub_path":"Python_H/SuzukiRika00/distanceeerror.py","file_name":"distanceeerror.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"26847015502","text":"from abc import ABC, abstractmethod\n\nFEATURE_METADATA_TABULATION = ' '\nHYDROGEN_BOND_SIGN = '|'\nFASTA_EXTENSION = '.fasta'\nGENBANK_EXTENSION = '.gb'\n\n\nclass AnnotationCreator:\n @staticmethod\n def check_format_and_parse(file_path: str):\n \"\"\"Check file format and conduct appropriate parsing\"\"\"\n if file_path.endswith(FASTA_EXTENSION):\n fasta_file = FastaParser().parse_file_content(file_path)\n return fasta_file\n elif file_path.endswith(GENBANK_EXTENSION):\n gb_file = GenBankParser().parse_file_content(file_path)\n return gb_file\n else:\n print('Unsupported format. Possible formats: .fasta or .gb')\n\n\nclass AbstractParser(ABC):\n @abstractmethod\n def parse_file_content(self, file_path: str):\n pass\n\n\nclass FastaParser(AbstractParser):\n def parse_file_content(self, file_path: str):\n \"\"\"Parse FASTA file content and create AnnotationFasta object\"\"\"\n lines = []\n with open(file_path, 'r') as fasta_file:\n for line in fasta_file:\n line = line.replace('\\n', '')\n lines.append(line)\n description_dna = lines[0].split(' ', maxsplit=1)\n dna_identifier = description_dna[0].replace('>', '')\n dna_size = description_dna[1].replace('(', '').replace(')', '')\n forward_strand = ''.join(lines[1:])\n return AnnotationFasta(forward_strand, dna_identifier, dna_size)\n\n\nclass GenBankParser(AbstractParser):\n def parse_file_content(self, file_path: str):\n \"\"\"Parse GenBank file content and create AnnotationGenBank object\"\"\"\n with open(file_path, 'r') as gb_file:\n file_lines = []\n for line in gb_file: # parse file to get only FEATURES lines\n file_lines.append(line)\n title_list = file_lines[0].split()\n\n locus = title_list[0]\n sequence_identifier = title_list[1]\n sequence_size = ' '.join(title_list[2:4])\n type_of_sequence = title_list[4]\n topology = title_list[5]\n gen_bank_division = title_list[6]\n modification_date = title_list[7]\n dna_features = self.describe_dna_features(file_lines)\n dna_sequence = self.describe_dna_sequence(file_lines)\n return AnnotationGenBank(dna_sequence, sequence_identifier, sequence_size, locus,\n type_of_sequence, topology, gen_bank_division, modification_date, dna_features)\n\n def describe_dna_features(self, file_lines):\n \"\"\"Parse feature part of GenBank file and create list of features\"\"\"\n list_of_features = []\n start = None\n end = None\n for line in file_lines:\n if 'FEATURES' in line: # it starts in line which contains FEATURES\n start = file_lines.index(line) + 1\n if 'ORIGIN' in line: # it ends in line which contains ORIGIN\n end = file_lines.index(line)\n if start is not None and end is not None:\n feature_lines = [e for e in file_lines[start:end]]\n feature_name = ''\n metadata_for_feature = {}\n for line in feature_lines:\n if line.startswith(FEATURE_METADATA_TABULATION):\n if '=' not in line: # it means than current line is continue of previous feature\n previous_value = metadata_for_feature[list(metadata_for_feature)[-1]]\n current_value = f'{previous_value}{line.strip()}'\n metadata_for_feature[list(metadata_for_feature)[-1]] = current_value.replace('\\n', '').replace(\"<\", \"\").replace('\"', '')\n else: # it means than it's a new feature\n k, v = line.split('=')\n k = k.replace('/', '').strip()\n v = v.replace('\\n', '').replace(\"<\", \"\").replace('\"', '')\n metadata_for_feature[k] = v\n else:\n if feature_name != '': # it means than it's first iteration, when feature isn't parsed yet\n feature = Feature(feature_name, metadata_for_feature)\n list_of_features.append(feature)\n metadata_for_feature = {} # 'nulling' metadata for new feature\n feature_first_line = line.strip().split(' ')\n feature_name = feature_first_line[0]\n place = feature_first_line[-1]\n metadata_for_feature['place'] = place\n feature = Feature(feature_name, metadata_for_feature)\n list_of_features.append(feature)\n return list_of_features\n\n def describe_dna_sequence(self, file_lines):\n \"\"\"Parse dna sequence of GenBank file\"\"\"\n\n for line in file_lines:\n if 'ORIGIN' in line:\n start = file_lines.index(line) + 1\n tmp_list_of_dna_sequence = [e for e in file_lines[start:]]\n forward_strand = ''.join(tmp_list_of_dna_sequence)\n forward_strand = ''.join([i.upper() for i in forward_strand if not i.isdigit()]).replace('\\n', '').replace(' ', '')\n return forward_strand\n\n\nclass Annotation(ABC):\n @abstractmethod\n def __init__(self, forward_strand_dna: str, sequence_identifier: str, sequence_size: str):\n self.size = sequence_size\n self.identifier = sequence_identifier\n self.forward_strand_dna = forward_strand_dna\n\n def __str__(self):\n return str(f'forward_strand_dna = {self.forward_strand_dna},size = {self.size}, identifier = {self.identifier}')\n\n\nclass AnnotationFasta(Annotation):\n def __init__(self, forward_strand_dna: str, sequence_identifier: str, sequence_size: str):\n Annotation.__init__(self, forward_strand_dna, sequence_identifier, sequence_size)\n\n\nclass AnnotationGenBank(Annotation):\n def __init__(self, forward_strand_dna, sequence_identifier, sequence_size, locus, type_of_sequence,\n topology, gen_bank_division, modification_date, features):\n Annotation.__init__(self, forward_strand_dna, sequence_identifier, sequence_size)\n self.locus = locus\n self.type_of_sequence = type_of_sequence\n self.topology = topology\n self.gen_bank_division = gen_bank_division\n self.modification_date = modification_date\n self.features = features\n\n\nclass Feature:\n def __init__(self, name: str, metadata: dict):\n \"\"\"Creates feature object with appropriate metadata\"\"\"\n self.name = name\n self.metadata = metadata\n\n\nclass SequenceRenderer:\n def __init__(self, dna_description: AnnotationFasta or AnnotationGenBank):\n self.dna_description = dna_description\n\n def create_forward_strand(self):\n return self.dna_description.forward_strand_dna\n\n def render_forward_strand(self):\n \"\"\"Prints to console forward strand DNA\"\"\"\n print(self.create_forward_strand())\n\n def create_reverse_strand(self):\n \"\"\"Constructs reverse strand DNA in accordance with the principle of complementarity \"\"\"\n reverse_strand_nucleotides = []\n for nucleotide in self.dna_description.forward_strand_dna:\n if nucleotide == 'A':\n reverse_strand_nucleotides.append('T')\n elif nucleotide == 'T':\n reverse_strand_nucleotides.append('A')\n elif nucleotide == 'G':\n reverse_strand_nucleotides.append('C')\n elif nucleotide == 'C':\n reverse_strand_nucleotides.append('G')\n reverse_strand_dna = ''.join(reverse_strand_nucleotides)\n return reverse_strand_dna\n\n def render_reverse_strand(self):\n \"\"\"Prints to console reverse strand DNA\"\"\"\n print(self.create_reverse_strand())\n\n def create_double_strand(self):\n \"\"\"Binds DNA complementary chains(forward and reverse)\"\"\"\n reverse_and_forward_strands = []\n dna_reverse_strand = self.create_reverse_strand()\n reverse_and_forward_strands.append(dna_reverse_strand)\n reverse_and_forward_strands.append(self.dna_description.forward_strand_dna)\n sign = '\\n' + (HYDROGEN_BOND_SIGN * len(self.dna_description.forward_strand_dna)) + '\\n'\n double_strand_dna = sign.join(reverse_and_forward_strands)\n return double_strand_dna\n\n def render_double_strand(self):\n \"\"\"Prints to console double strand DNA\"\"\"\n print(self.create_double_strand())\n\n\nclass FastaSaver:\n\n @staticmethod\n def save_dna_to_fasta(file_path: str, dna: AnnotationFasta):\n \"\"\"Saves DNA and sequence description to provided file\"\"\"\n with open(file_path, 'a') as file:\n file.write(f'>{dna.identifier} ({dna.size})\\n{dna.forward_strand_dna}')\n\n\n\n","repo_name":"AlinaZabavkina/GenBank","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":8759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"16273854832","text":"''' Пользователь вводит текст. Вывести самое длинное слово и наиболее \nчасто встречаемое слово. '''\n\ncounter = {}\ntext = input().split()\nfor word in text:\n counter[word] = counter.get(word, 0) + 1\nn = max(counter.values())\nlst = []\nfor key, val in counter.items():\n if val == n:\n lst.append(key)\nprint('Самое(ые) наиболее встречаемое(ые) слово(а):', \", \".join(lst))\n\ncounter1 = {}\nfor word in text:\n counter1[word] = counter1.get(word, len(word))\nn1 = max(counter1.values())\nlst1 = []\nfor key, val in counter1.items():\n if val == n1:\n lst1.append(key)\nprint(' Самое(ые) длинное(ые) слово(а):', \", \".join(lst1))\n\n","repo_name":"Mathematicienne/repozitorii","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"10002028378","text":"import numpy as np\nimport sys\nimport pandas\nfrom cmf import matrix_factorization\n\nargs=list(sys.argv)\ntest_input =args[2]\ntrain_input =args[1]\nsteps = int(args[3])\n\ntrain = pandas.read_csv(train_input).sort_values(by=['user_id','movie_id'])\ntest = pandas.read_csv(test_input).sort_values(by=['user_id','movie_id'])\nusers = train['user_id'].astype(int)\nmovies = train['movie_id'].astype(int)\nratings = train['rating']\ntest_users = test['user_id'].astype(int)\ntest_movies = test['movie_id'].astype(int)\ntest_ratings = test['rating']\n\nmatrix_factorization(users, movies, ratings, test_users, test_movies, test_ratings, K=30, steps=steps, alpha=0.0002, beta=0.01, delta=0.01)\n \n","repo_name":"17mcpc14/blockgmf","sub_path":"src/main_cmf.py","file_name":"main_cmf.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"73403395903","text":"\"\"\"\nPyRPM\n=====\n\nPyRPM is a pure python, simple to use, module to read information from a RPM file.\n\"\"\"\n\nfrom collections import namedtuple\nimport hashlib\nimport re\nimport stat\nimport struct\nimport sys\n\nif sys.version < '3':\n try:\n from io import StringIO as BytesIO\n except ImportError:\n from io import StringIO as BytesIO\nelse:\n from io import BytesIO\n\n\nclass Entry(object):\n \"\"\" RPM Header Entry \"\"\"\n\n # noinspection PyShadowingBuiltins\n def __init__(self, entry=None, store=None, tag=None, type=None, value=None):\n # noinspection PyPep8Naming\n DECODING_MAP = {\n 0: self._read_null,\n 1: self._read_char,\n 2: self._read_int8,\n 3: self._read_int16,\n 4: self._read_int32,\n 5: self._read_int64,\n 6: self._read_string,\n 7: self._read_binary,\n 8: self._read_string_array,\n 9: self._read_string,\n }\n\n # read from file if possible\n if entry is not None and store is not None:\n # seek to position in store\n store.seek(entry[2])\n\n # decode information\n self.entry = entry\n self.tag = entry[0]\n self.type = entry[1]\n self.value = DECODING_MAP[entry[1]](store, entry[3])\n else:\n self.tag = tag\n self.type = type\n self.value = value\n\n def __str__(self):\n return \"(%s, %s)\" % (self.tag, self.value, )\n\n def __repr__(self):\n return \"(%s, %s)\" % (self.tag, self.value, )\n\n @staticmethod\n def _read_format(fmt, store):\n size = struct.calcsize(fmt)\n data = store.read(size)\n unpacked_data = struct.unpack(fmt, data)\n return unpacked_data[0] if len(unpacked_data) == 1 else unpacked_data\n\n # noinspection PyUnusedLocal\n @staticmethod\n def _read_null(store, data_count):\n return None\n\n def _read_char(self, store, data_count):\n \"\"\" store is a pointer to the store offset\n where the char should be read\n \"\"\"\n return self._read_format('!{0:d}s'.format(data_count), store)\n\n def _read_int8(self, store, data_count):\n \"\"\" int8 = 1byte\n \"\"\"\n return int(self._read_char(store, data_count))\n\n def _read_int16(self, store, data_count):\n \"\"\" int16 = 2bytes\n \"\"\"\n return self._read_format('!{0:d}h'.format(data_count), store)\n\n def _read_int32(self, store, data_count):\n \"\"\" int32 = 4bytes\n \"\"\"\n return self._read_format('!{0:d}i'.format(data_count), store)\n\n def _read_int64(self, store, data_count):\n \"\"\" int64 = 8bytes\n \"\"\"\n return self._read_format('!{0:d}q'.format(data_count), store)\n\n def _read_string(self, store, data_count):\n \"\"\" read a string entry\n \"\"\"\n string = b''\n while True:\n char = self._read_char(store, 1)\n if char == b'\\x00': # read until '\\0'\n break\n string += char\n return string.decode('utf-8')\n\n def _read_string_array(self, store, data_count):\n \"\"\" read a array of string entries\n \"\"\"\n return [self._read_string(store, 1) for i in range(data_count)]\n\n def _read_binary(self, store, data_count):\n \"\"\" read a binary entry\n \"\"\"\n return self._read_format('!{0:d}s'.format(data_count), store)\n\n\n# noinspection PyBroadException\nclass HeaderBase(object):\n\n \"\"\" RPM Header Structure \"\"\"\n MAGIC_NUMBER = b'\\x8e\\xad\\xe8'\n MAGIC_NUMBER_MATCHER = re.compile(b'(\\x8e\\xad\\xe8)')\n\n TAGS = {}\n\n def __init__(self, file):\n \"\"\" read a RPM header structure with all its entries\n\n Header format:\n [3bytes][1byte][4bytes][4bytes][4bytes]\n MN VER UNUSED IDXNUM STSIZE\n\n Entry format:\n [4bytes][4bytes][4bytes][4bytes]\n TAG TYPE OFFSET COUNT\n \"\"\"\n self.entries = []\n\n # read from file if possible\n if file:\n # read and check header\n start = file.tell()\n header = struct.unpack('!3sc4sll', file.read(16))\n if header[0] != self.MAGIC_NUMBER:\n raise RPMError('invalid RPM header')\n\n # read entries and store\n entries = [file.read(16) for i in range(header[3])]\n store = BytesIO(file.read(header[4]))\n\n # parse entries\n for entry in entries:\n parsed_entry = struct.unpack(\"!4l\", entry)\n object_entry = Entry(parsed_entry, store)\n\n if object_entry:\n self.entries.append(object_entry)\n end = file.tell()\n self.header_range = (start, end)\n\n def __getattr__(self, name):\n if name in self.TAGS:\n id_, default = self.TAGS[name]\n try:\n return self[id_]\n except:\n return default\n\n raise AttributeError(name)\n\n def __iter__(self):\n for entry in self.entries:\n yield entry\n\n def __getitem__(self, item):\n for entry in self:\n if entry.tag == item:\n return entry.value\n raise KeyError()\n\n\n# signature header section\nclass Signature(HeaderBase):\n TAGS = {\n 'size': (1000, -1),\n 'pgp': (1002, \"\"),\n 'md5': (1004, \"\"),\n 'gpg': (1005, \"\"),\n 'pgp5': (1006, \"\"),\n 'payload_size': (1007, -1),\n }\n\n\n# primary header section\nclass Header(HeaderBase):\n TAGS = {\n 'name': (1000, \"\"),\n 'version': (1001, \"0.1\"),\n 'release': (1002, \"\"),\n 'epoch': (1003, 0),\n 'summary': (1004, \"\"),\n 'description': (1005, \"\"),\n 'build_time': (1006, 0),\n 'build_host': (1007, \"\"),\n 'size': (1009, 0),\n 'vendor': (1011, \"\"),\n 'license': (1014, \"\"),\n 'packager': (1015, \"\"),\n 'group': (1016, []),\n 'url': (1020, \"\"),\n 'architecture': (1022, \"\"),\n 'source_rpm': (1044, \"\"),\n 'archive_size': (1046, 0),\n 'provides': (1047, []),\n 'requires': (1049, []),\n 'conflicts': (1054, []),\n 'platform': (1132, \"\"),\n }\n\n\nclass RPMError(BaseException):\n pass\n\n\nRPMFile = namedtuple(\"RPMFile\", ['name', 'size', 'mode', 'rdevice', 'device', 'time', 'digest', 'link_to',\n 'flags', 'username', 'group', 'verify_flags', 'language', 'inode', 'color', 'content_class', 'type', 'primary'])\nRPMChangeLog = namedtuple(\"RPMChangeLog\", ['name', 'text', 'time'])\nRPMprco = namedtuple(\"RPMprco\", ['name', 'version', 'flags', 'str_flags'])\n\n\n# noinspection PyPep8Naming,PyBroadException\nclass RPM(object):\n RPM_LEAD_MAGIC_NUMBER = b'\\xed\\xab\\xee\\xdb'\n RPM_PRCO_FLAGS_MAP = {0: None, 2: 'LT', 4: 'GT', 8: 'EQ', 10: 'LE', 12: 'GE'}\n\n def __init__(self, rpm):\n \"\"\" rpm - StringIO.StringIO/io.BytesIO | file\n \"\"\"\n if hasattr(rpm, 'read'): # if it walk like a duck..\n self.rpmfile = rpm\n else:\n raise ValueError('invalid initialization: StringIO/BytesIO or file expected received %s' % (type(rpm), ))\n\n self.binary = None\n self.source = None\n self.header = None\n self.signature = None\n self.filelist = []\n self.changelog = []\n\n self.provides = []\n self.requires = []\n self.obsoletes = []\n self.conflicts = []\n\n self._read_lead()\n self._read_signature()\n self._read_header()\n self._match_composite()\n self._compute_checksum()\n\n @property\n def canonical_filename(self):\n if self.header.epoch == 0:\n return \"%s-%s-%s.%s.rpm\" % (self.header.name, self.header.version, self.header.release, self.header.architecture if self.binary else \"src\")\n else:\n return \"%s-%s-%s-%d.%s.rpm\" % (self.header.name, self.header.version, self.header.release, self.header.epoch, self.header.architecture if self.binary else \"src\")\n\n def _read_lead(self):\n \"\"\" reads the rpm lead section\n\n struct rpmlead {\n unsigned char magic[4];\n unsigned char major, minor;\n short type;\n short archnum;\n char name[66];\n short osnum;\n short signature_type;\n char reserved[16];\n } ;\n \"\"\"\n lead_fmt = '!4sBBhh66shh16s'\n data = self.rpmfile.read(96)\n value = struct.unpack(lead_fmt, data)\n\n magic_num = value[0]\n ptype = value[3]\n\n if magic_num != self.RPM_LEAD_MAGIC_NUMBER:\n raise RPMError('wrong magic number this is not a RPM file')\n\n if ptype == 1:\n self.binary = False\n self.source = True\n elif ptype == 0:\n self.binary = True\n self.source = False\n else:\n raise RPMError('wrong package type this is not a RPM file')\n\n def _read_signature(self):\n \"\"\" read signature header \"\"\"\n\n # find the start of the header\n if not self._find_magic_number():\n raise RPMError('invalid RPM file, signature area not found')\n\n # consume signature area\n self.signature = Signature(self.rpmfile)\n\n def _read_header(self):\n \"\"\" read information header \"\"\"\n\n # find the start of the header\n if not self._find_magic_number():\n raise RPMError('invalid RPM file, header not found')\n\n # consume header area\n self.header = Header(self.rpmfile)\n\n def _find_magic_number(self):\n \"\"\" find a magic number in a buffer\n \"\"\"\n string = self.rpmfile.read(1)\n while True:\n match = HeaderBase.MAGIC_NUMBER_MATCHER.search(string)\n if match:\n self.rpmfile.seek(-3, 1)\n return True\n byte = self.rpmfile.read(1)\n if not byte:\n return False\n else:\n string += byte\n return False\n\n def _match_composite(self):\n # files\n try:\n for idx, name in enumerate(self.header[1117]):\n dirname = self.header[1118][self.header[1116][idx]]\n self.filelist.append(RPMFile(\n name=dirname + name,\n size=self.header[1028][idx],\n mode=self.header[1030][idx],\n rdevice=self.header[1033][idx],\n time=self.header[1034][idx],\n digest=self.header[1035][idx],\n link_to=self.header[1036][idx],\n flags=self.header[1037][idx],\n username=self.header[1039][idx],\n group=self.header[1040][idx],\n verify_flags=self.header[1045][idx],\n device=self.header[1095][idx],\n inode=self.header[1096][idx],\n language=self.header[1097][idx],\n color=self.header[1140][idx] if 1140 in self.header else None,\n content_class=self.header[1142][self.header[1141][idx]] if 1142 in self.header and 1141 in self.header else None,\n type='dir' if stat.S_ISDIR(self.header[1030][idx] & 65535) else ('ghost' if (self.header[1037][idx] & 64) else 'file'),\n primary=('bin/' in dirname or dirname.startswith('/etc/'))))\n except:\n pass\n\n # change log\n try:\n if self.header[1081]:\n for name, time, text in zip(self.header[1081], self.header[1080], self.header[1082]):\n self.changelog.append(RPMChangeLog(name=name, time=time, text=text))\n except:\n pass\n\n # provides\n try:\n if self.header[1047]:\n for name, flags, version in zip(self.header[1047], self.header[1112], self.header[1113]):\n self.provides.append(\n RPMprco(name=name, flags=flags, str_flags=self.RPM_PRCO_FLAGS_MAP[flags & 0xf], version=self._stringToVersion(version)))\n except:\n pass\n\n # requires\n try:\n if self.header[1049]:\n for name, flags, version in zip(self.header[1049], self.header[1048], self.header[1050]):\n self.requires.append(\n RPMprco(name=name, flags=flags, str_flags=self.RPM_PRCO_FLAGS_MAP[flags & 0xf], version=self._stringToVersion(version)))\n except:\n pass\n\n # obsoletes\n try:\n if self.header[1090]:\n for name, flags, version in zip(self.header[1090], self.header[1114], self.header[1115]):\n self.obsoletes.append(\n RPMprco(name=name, flags=flags, str_flags=self.RPM_PRCO_FLAGS_MAP[flags & 0xf], version=self._stringToVersion(version)))\n except:\n pass\n\n # conflicts\n try:\n if self.header[1054]:\n for name, flags, version in zip(self.header[1054], self.header[1053], self.header[1055]):\n self.conflicts.append(\n RPMprco(name=name, flags=flags, str_flags=self.RPM_PRCO_FLAGS_MAP[flags & 0xf], version=self._stringToVersion(version)))\n except:\n pass\n\n def _compute_checksum(self):\n self.rpmfile.seek(0)\n m = hashlib.sha256()\n size = 0\n data = self.rpmfile.read()\n while data:\n size += len(data)\n m.update(data)\n data = self.rpmfile.read()\n self.filesize = size\n self.checksum = m.hexdigest()\n\n def _stringToVersion(self, verstring):\n if verstring in [None, '']:\n return None, None, None\n i = verstring.find(':')\n if i != -1:\n try:\n epoch = str(int(verstring[:i]))\n except ValueError:\n # look, garbage in the epoch field, how fun, kill it\n epoch = '0' # this is our fallback, deal\n else:\n epoch = '0'\n j = verstring.find('-')\n if j != -1:\n if verstring[i + 1:j] == '':\n version = None\n else:\n version = verstring[i + 1:j]\n release = verstring[j + 1:]\n else:\n if verstring[i + 1:] == '':\n version = None\n else:\n version = verstring[i + 1:]\n release = None\n return epoch, version, release\n","repo_name":"d9pouces/Moneta","sub_path":"moneta/repositories/rpm.py","file_name":"rpm.py","file_ext":"py","file_size_in_byte":14539,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"24"} +{"seq_id":"12783310830","text":"import pickle\r\nimport streamlit as st\r\n\r\n# membaca model\r\nHR_model = pickle.load(open('klasifikasi_HR.sav', 'rb'))\r\n\r\n#judul web\r\nst.title('Prediksi Interview Kandidat ')\r\n\r\n#membagi kolom\r\ncol1, col2 = st.columns(2)\r\n\r\nwith col1 :\r\n years_of_experience = st.number_input ('Jumlah tahun pengalaman yang dimiliki kandidat di bidangnya')\r\n functional_competency_score = st.number_input ('Skor yang mewakili kompetensi fungsional kandidat berdasarkan tes')\r\n top1_skills_score = st.number_input ('Skor keterampilan paling berharga yang dimiliki kandidat')\r\n top2_skills_score = st.number_input ('Skor keterampilan paling berharga kedua yang dimiliki kandidat')\r\n top3_skills_score = st.number_input ('Skor keterampilan paling berharga ketiga yang dimiliki kandidat')\r\n\r\nwith col2 :\r\n behavior_competency_score = st.number_input ('Skor yang mewakili kompetensi perilaku kandidat yang diperoleh dari tes SDM')\r\n top1_behavior_skill_score = st.number_input ('Skor keterampilan perilaku paling berharga yang dimiliki kandidat')\r\n top2_behavior_skill_score = st.number_input ('Skor keterampilan perilaku paling berharga kedua yang dimiliki kandidat')\r\n top3_behavior_skill_score = st.number_input ('Skor dari keterampilan perilaku paling berharga ketiga yang dimiliki kandidat')\r\n\r\n# code untuk prediksi\r\nHR_predict = ''\r\n\r\n# membuat tombol untuk prediksi\r\nif st.button('Test Prediksi Interview'):\r\n HR_prediction = HR_model.predict([[years_of_experience, functional_competency_score,\r\n top1_skills_score, top2_skills_score, top3_skills_score,\r\n behavior_competency_score, top1_behavior_skill_score,\r\n top2_behavior_skill_score, top3_behavior_skill_score]])\r\n\r\n if(HR_prediction[0] == 1):\r\n HR_predict = 'Kandidat Dipanggil Untuk Interview'\r\n else:\r\n HR_predict = 'Kandidat Tidak Dipanggil Untuk Interview'\r\nst.success(HR_predict)\r\n","repo_name":"Aldaniapriadi/klasifikasi-interview","sub_path":"klasifikasi_HR.py","file_name":"klasifikasi_HR.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"21608848344","text":"from pyspark.sql import SparkSession\nimport matplotlib.pyplot as plt\n\n\n# Run a SQL query on a Spark DataFrame and return the result of the query\ndef query_transform():\n spark = SparkSession.builder.appName(\"Query\").getOrCreate()\n query = \"\"\"\n SELECT p1.Region, p1.Country Name, p1.2015, p1.2016, p1.2017, p1.2018, p2.2019, p2.2020, p2.2021, p2.2022, AVG(p2.2022) OVER(PARTITION BY p1.Region) as avg_2022\n FROM popu_1_delta p1\n JOIN popu_2_delta p2 ON p1.Country Name = p2.Country Name\n ORDER BY avg_2022 DESC, p1.Country Name\n \"\"\"\n query_result = spark.sql(query)\n return query_result\n\n\n# sample viz for project\ndef viz_1():\n query = query_transform()\n count = query.count()\n if count > 0:\n print(f\"Data validation passed. {count} rows available.\")\n else:\n print(\"No data available. Please double check.\")\n\n query_result = query.select(\"Region\", \"avg_2022\").toPandas()\n\n # Group by 'Region' and calculate the mean of 'avg_year_2022'\n grouped_data = query_result.groupby(\"Region\")[\"avg_2022\"].mean().reset_index()\n\n # Create a bar chart with the grouped data\n grouped_data.plot(kind=\"bar\", x=\"Region\", y=\"avg_2022\", color=\"cyan\")\n\n # Set labels and title\n plt.xlabel(\"Region\")\n plt.ylabel(\"Average Population in 2022\")\n plt.title(\"Average Population in 2022 by Region\")\n\n # Show the plot\n plt.show(\"Avg_2022_Region.png\")\n\n query_result = query.select(\"Region\", \"avg_2022\").toPandas()\n\n\ndef viz_2():\n spark = SparkSession.builder.appName(\"Query\").getOrCreate()\n # Write a SQL query to select rows where 'Country' is 'United States' or 'China'\n query = \"\"\"\n SELECT Country Name, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 \n FROM popu_1_delta\n WHERE Country IN ('United States', 'China', 'Japan')\n \"\"\"\n\n # Execute the SQL query\n query_result = spark.sql(query)\n\n # Convert the result to a Pandas DataFrame for further operations or visualization\n popu_us_china = query_result.toPandas().set_index(\"Country Name\")\n\n # Transposing the DataFrame to have years as the X-axis and wages as the Y-axis\n popu_transposed = popu_us_china.transpose()\n\n # Plotting the line graph\n popu_transposed.plot(kind=\"line\", marker=\"o\")\n\n # Setting labels and title\n plt.xlabel(\"Year\")\n plt.ylabel(\"Population\")\n plt.title(\"Development of Population Over the Years for the US, China and Japan\")\n\n # Display the legend\n plt.legend(title=\"Country\")\n\n # Show the plot\n plt.show(\"popu_us_china\")\n\n\nif __name__ == \"__main__\":\n query_transform()\n viz_1()\n viz_2()\n","repo_name":"nogibjj/Alicia_individual3","sub_path":"mylib/query_viz.py","file_name":"query_viz.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"9332409450","text":"class Course:\n\n def __init__(self, ident: str, name: str, kind: str, group: str, time: str, frequency: str, time_period: str,\n room_info: str, docent: str, status: str, warning: str):\n self.id = ident\n self.name = name\n self.kind = kind\n self.group = group\n self.time = time\n self.frequency = frequency\n self.time_period = time_period\n self.room_info = room_info\n self.docent = docent\n self.status = status\n self.warning = warning\n\n def __str__(self):\n course_str = f\"\\tID: {self.id}\\n\\tName: {self.name}\\n\\tTyp: {self.kind}\\n\\t\" \\\n f\"Parralelgruppe: {self.group}\\n\\tZeit: {self.time}\\n\\t\" \\\n f\"Frequenz: {self.frequency}\\n\\tZeitraum: {self.time_period}\\n\\t\" \\\n f\"Rauminfo: {self.room_info}\\n\\tDozent/in: {self.docent}\\n\\t\" \\\n f\"Status: {self.status}\"\n if self.warning is not \"None\":\n course_str += f\"\\n\\tWarnung: {self.warning}\"\n return course_str\n","repo_name":"ElCap1tan/horstl_wrapper","sub_path":"horstl_wrapper/models/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"24"} +{"seq_id":"74425285823","text":"import cPickle as pickle\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nplt.ion()\nsns.set_context('paper', font_scale=1.4)\nA4_WIDTH = 8.27\n\nwith open('data/processed/'\n '100_mc_KaggleDR_test_BayesJFnet17_392bea6.pkl', 'rb') as h:\n pred_kaggle_1 = pickle.load(h)\nwith open('data/processed/'\n '100_mc_imagenet_val_BayesJFnet17_392bea6.pkl', 'rb') as h:\n pred_imagenet_1 = pickle.load(h)\nwith open('data/processed/'\n '100_mc_KaggleDR_test_bcnn2_b69aadd.pkl', 'rb') as h:\n pred_kaggle_2 = pickle.load(h)\nwith open('data/processed/'\n '100_mc_imagenet_val_BayesianJFnet17_onset2_b69aadd.pkl', 'rb') as h:\n pred_imagenet_2 = pickle.load(h)\n\npred_std_kaggle_1 = pred_kaggle_1['stoch_out'].std(axis=-1)[:, 1]\npred_std_imagenet_1 = pred_imagenet_1['stoch_out'].std(axis=-1)[:, 1]\npred_std_kaggle_2 = pred_kaggle_2['stoch_out'].std(axis=-1)[:, 1]\npred_std_imagenet_2 = pred_imagenet_2['stoch_out'].std(axis=-1)[:, 1]\n\nplt.figure(figsize=(A4_WIDTH, A4_WIDTH // 2))\n\nplt.subplot(121)\nplt.title('(a) DNN for disease onset 1')\nsns.kdeplot(pred_std_kaggle_1, shade=True, label='FUNDUS images')\nsns.kdeplot(pred_std_imagenet_1, shade=True, label='non-FUNDUS images')\nplt.xlabel('model uncertainty')\nplt.ylabel('density')\nplt.legend(loc='upper right')\n\nplt.subplot(122)\nplt.title('(b) DNN for disease onset 2')\nsns.kdeplot(pred_std_kaggle_2, shade=True, label='FUNDUS images')\nsns.kdeplot(pred_std_imagenet_2, shade=True, label='non-FUNDUS images')\nplt.xlabel('model uncertainty')\nplt.legend(loc='upper right')\n","repo_name":"chleibig/disease-detection","sub_path":"scripts/confounding_images.py","file_name":"confounding_images.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"24"} +{"seq_id":"43909207","text":"import json\n\nfrom ISO_model.scripts.generators.hutn_generator import HutnGenerator\nfrom ISO_model.scripts.parsers.ijson_parser import IJsonParser\n\n\ndef class_name_to_hutn_prefix(class_name):\n cln_capitals = ''.join(filter(str.isupper, class_name))\n if len(cln_capitals) >= 2:\n return cln_capitals.lower()\n else:\n return class_name[:2].lower()\n\n\nclass IsoModelHutnGenerator(HutnGenerator):\n def __init__(self, clauses: IJsonParser, work_products: IJsonParser, other_inputs: IJsonParser):\n super(IsoModelHutnGenerator, self).__init__()\n self.requirements = {}\n self.clauses = clauses\n self.work_products = work_products\n self.other_inputs = other_inputs\n\n def load(self, iso_json):\n self.requirements.update(json.load(open(iso_json)))\n\n def generate(self, outfile):\n self.out = open(outfile, 'w+')\n self.clauses.validate()\n self.work_products.validate()\n self.other_inputs.validate()\n\n self.print_package('iso_model')\n self.print_block('iso_model')\n\n # Requirements\n for r_id, r in sorted(self.requirements.items()):\n if any((r['annotations'].get(a, False) for a\n in ('ignore', 'work_product'))):\n continue\n r['name'] = r['title']\n r.setdefault('id', r_id)\n self.inst_with_id('IsoRequirement', r, ('id', 'name'))\n\n self.print_model_instances(self.clauses)\n self.print_model_instances(self.work_products)\n\n self._finish()\n\n def generate_complete(self, outfile):\n self.out = open(outfile, 'w+')\n self.clauses.validate()\n self.work_products.validate()\n self.other_inputs.validate()\n\n self.print_package('iso_model')\n self.print_block('iso_model')\n self.print_block('Iso26262', self.generate_iso_element)\n self._finish()\n\n def generate_iso_element(self):\n parts = [c['id'].split('-')[0]\n for c in self.clauses.get_with_id()]\n parts = list(sorted(set(parts)))\n requirements = [{'id': rid, 'name': r.get('title')}\n for rid, r in sorted(self.requirements.items())]\n\n self.print_attr_array(\n 'parts', 'Part',\n lambda p: 'p' + p,\n lambda p: self.generate_part(p),\n parts\n )\n self.print_attr_array(\n 'clauses', 'Clause',\n lambda c: 'cl' + c['id'],\n lambda c: self.generate_clause(c),\n self.clauses.get_with_id()\n )\n self.print_model_instances(self.work_products, 'work_products')\n self.print_model_instances(self.other_inputs, 'other_inputs')\n self.print_attr_array(\n 'requirements', 'IsoRequirement',\n lambda r: 'ir' + r['id'],\n lambda r: self.values(r, ('id', 'name')),\n requirements\n )\n\n def generate_part(self, p):\n clauses = [c for c in self.clauses.get_with_id()\n if c['id'].startswith(str(p))]\n self.print_kv('id', str(p))\n self.print_kv('clauses', [c['id'] for c in clauses], 'Clause')\n\n def generate_clause(self, c):\n requirements = [\n {'id': rid, 'name': r.get('title')}\n for rid, r in sorted(self.requirements.items())\n if rid.startswith(c['id'])]\n\n self.print_kv('id', c['id'])\n self.print_kv('name', c['name'])\n self.print_kv('requirements', [r['id'] for r in requirements], 'IsoRequirement')\n self.print_kv('work_product_input', c['work_product_input'], 'WorkProductType')\n self.print_kv('other_input', c['other_input'], 'Input')\n\n\ndef main():\n work_products = IJsonParser()\n work_products.load(r'ISO_model/work_products.json')\n clauses = IJsonParser()\n clauses.load(r'ISO_model/clauses.json')\n other_inputs = IJsonParser()\n other_inputs.load('ISO_model/other_input.json')\n\n g = IsoModelHutnGenerator(clauses, work_products, other_inputs)\n g.load('ISO_model/generated/ISO-1-text.json')\n g.load('ISO_model/generated/ISO-3-text.json')\n g.load('ISO_model/generated/ISO-8-text.json')\n g.generate_complete(r'data_models/model/iso/iso_model/generated/iso26262-complete.hutn')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"den1den/adaptive_cruise_control","sub_path":"ISO_model/scripts/generators/iso_hutn_generator.py","file_name":"iso_hutn_generator.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"24"} +{"seq_id":"71638677182","text":"import time\nimport requests\nimport asyncio\n\nBASE_URL = \"https://yukicoder.me/api/v1/\"\n\ndef calc_time(fn):\n def wrapper(*args, **kwargs):\n start = time.time()\n fn(*args, **kwargs)\n end = time.time()\n print(f\"[{fn.__name__}] elapsed time: {end - start}\")\n return\n return wrapper\n\ndef get_sync(path:str) -> dict:\n print(f\"/{path} request\")\n res = requests.get(BASE_URL + path)\n print(f\"/{path} request done\")\n return res.json()\n\n@calc_time\ndef main_sync():\n data_ls = []\n paths = [\n 'problems',\n 'languages',\n 'ranking/golfer',\n 'statistics/tags',\n 'contest/future',\n ]\n for path in paths:\n data_ls.append(get_sync(path))\n return data_ls\n\nif __name__ == \"__main__\":\n result = main_sync()\n","repo_name":"k-yamasaki-zakisan/Python-script","sub_path":"api_scraptig/asyncio/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"34527441632","text":"from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QVBoxLayout,\n QPushButton, QTableWidget, QTableWidgetItem,\n QHeaderView, QComboBox, QLabel, QLineEdit, QGridLayout)\nfrom strings import errors\n\n\nclass TableController(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n\n\n def initUi(self):\n self.layout = QGridLayout()\n self.btn_add_row = QPushButton('Add row')\n self.btn_insert_row = QPushButton('Insert row')\n self.btn_insert_row.setDisabled(True)\n self.btn_add_row.setDisabled(True)\n\n self.layout.addWidget(self.btn_add_row, 1, 1)\n self.layout.addWidget(self.btn_insert_row, 1, 2)\n\n self.setLayout(self.layout)\n self.show()","repo_name":"sanchine/vape_parser","sub_path":"widgets/table_controller.py","file_name":"table_controller.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"33867966520","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch_ros.actions import Node\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.substitutions import LaunchConfiguration\n\n\ndef generate_launch_description():\n\n mir_description_dir = get_package_share_directory('mir_description')\n namespace = LaunchConfiguration('namespace')\n use_sim_time = LaunchConfiguration('use_sim_time', default=False)\n rviz_config_file = LaunchConfiguration('rviz_config_file')\n\n return LaunchDescription([\n\n DeclareLaunchArgument(\n name='namespace',\n default_value=''\n ),\n\n DeclareLaunchArgument(\n 'rviz_config_file',\n default_value=os.path.join(mir_description_dir, 'rviz', 'mir_visu_full.rviz'),\n description='Define rviz config to be used'\n ),\n\n Node(\n package='teleop_twist_keyboard',\n executable='teleop_twist_keyboard',\n namespace=namespace,\n prefix='xterm -e'),\n\n Node(\n package='rviz2',\n executable='rviz2',\n output={'both': 'log'},\n parameters=[{'use_sim_time': use_sim_time}],\n arguments=['-d', rviz_config_file]\n ),\n\n ])\n","repo_name":"Slangb17/traffic_management_solution","sub_path":"mir_robot/mir_description/mir_driver/launch/additions_launch.py","file_name":"additions_launch.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"23239354286","text":"\"\"\"\nHandles the score\n\"\"\"\nfrom typing import Tuple\nimport pygame\nfrom wall.static_sprite import Display\n\nclass Score(pygame.sprite.Sprite):\n font: pygame.font = None\n score: int = 0\n red: Tuple[int, int, int] = (255, 0, 0)\n black: Tuple[int, int, int] = (0, 0, 0)\n green: Tuple[int, int, int] = (0, 255, 0)\n blue: Tuple[int, int, int] = (0, 0, 255)\n display: Display = None\n height = 0\n\n def __init__(self, screen: pygame.Surface, height: int):\n super().__init__()\n pygame.font.init()\n self.font: pygame.Font = pygame.font.SysFont('Comic Sans MS', 30)\n screen_width, screen_height = pygame.display.get_surface().get_size()\n self.display = Display(screen, screen_width, screen_height)\n self.height = height\n\n def add_score(self, added_score) -> None:\n self.score += added_score\n\n def get_score_images(self):\n color_score: Tuple[int, int, int] = self.green\n if self.score < 0:\n color_score = self.red\n\n text_surface = self.font.render('Your score: ', False, self.blue)\n score_surface = self.font.render(str(self.score), False, color_score)\n return text_surface, score_surface\n\n def display_on_screen(self) -> None:\n text_surface: pygame.Surface = None\n score_surface: pygame.Surface = None\n\n text_surface, score_surface = self.get_score_images()\n whole_width: int = text_surface.get_width() + score_surface.get_width()\n left_pos_text: int = (self.display.screen_width - whole_width ) // 2\n left_pos_score = left_pos_text + text_surface.get_width()\n top_pos = (self.height // 2 - max(text_surface.get_height(), score_surface.get_height())) \n \n self.display.screen.blit(text_surface, (left_pos_text, top_pos))\n self.display.screen.blit(score_surface, (left_pos_score, top_pos))\n\n\n \n","repo_name":"2BlackCoffees/wall","sub_path":"wall/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"10504730190","text":"\nfrom geometry_msgs.msg import Pose, Point, Quaternion\nimport rospy\nimport numpy as np\nimport time\n\n# Import utils.py from src folder\nimport sys, os, time\nprint('/'.join(os.path.realpath(__file__).split('/')[:-2]))\nsys.path.append('/'.join(os.path.realpath(__file__).split('/')[:-2]))\n\nfrom utils import *\nfrom coppelia_sim_ros_interface.srv import AddOrEditObject, AddOrEditObjectResponse, RemoveObject, RemoveObjectResponse, GripperControl, GripperControlResponse\nfrom coppelia_sim_ros_interface.msg import ObjectInfo\nfrom sensor_msgs.msg import JointState, Image\n\nfrom coppelia_sim_ros_client import CoppeliaROSInterface\n\nrospy.init_node(\"coppeliaSimPublisherTopic\", anonymous=True)\n\n# Prepare scene\npose = Pose()\npose.position = Point(0.4, 0.0, 0.0)\npose.orientation = Quaternion(0.7071067811865476, 0.7071067811865476, 0.0, 0.0)\nCoppeliaROSInterface.add_or_edit_object(name=\"object1\",pose=pose, shape='sphere', color='r', dynamic='false', size=[0.1,0.1,0.1], collision='false')\n\nCoppeliaROSInterface.add_or_edit_object(name='camera', pub_info='true')\n\nsim = CoppeliaROSInterface()\n\nsim.gripper_control(0.5)\ntime.sleep(1)\nsim.gripper_control(0.5)\ntime.sleep(1)\nsim.gripper_control(0.5)\ntime.sleep(1)\nsim.gripper_control(0.5)\ntime.sleep(1)\nsim.gripper_control(0.5)\ninput(\"1\")\nsim.gripper_control(0.0)\nsim.gripper_control(1.0)\n\ninput(\"2\")\n\nsim.gripper_control(0.0)\nsim.gripper_control(0.0)\n\ninput(\"3\")\n\nsim.gripper_control(1.0)\nsim.gripper_control(1.0)\ntime.sleep(2.0)\nsim.gripper_control(0.0)\nsim.gripper_control(0.0)\n\nexit()\n\nprint(\"OBJECT MOVING\")\npose.position = Point(0.4, 0.0, 0.02)\nsim.go_to_pose(pose, blocking=True)\nfor i in range(1000):\n t1 = time.perf_counter()\n pose.position = Point(0.4, np.cos(i/100)*0.35, np.sin(i/100)*0.35+0.36)\n sim.go_to_pose(pose, blocking=False)\n sim.gripper_control(0.0, eef_rot=np.cos(i/100)*2.0)\n CoppeliaROSInterface.add_or_edit_object(name=\"object1\",pose=pose)\n t2 = time.perf_counter()\n print(f\"tt {i}: {t2-t1}\")\n\nprint(\"DONE\")\n","repo_name":"imitrob/coppelia_sim_ros_interface","sub_path":"src/examples/control_example.py","file_name":"control_example.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"9904287304","text":"import json\nimport datetime\nimport http.client\n\nimport math\nfrom steem.account import Account\n\nfrom steem import Steem\n\nimport sys\n\n# web hook URL for Discord\nurl = \"https://discordapp.com/api/webhooks/412417201216421888/Qr0EYGw7tEN6VsPL6mmx_w0DmPeP5V4YC1rN0TNitUTObX9A4SOUGWwfF5R0UxWACXcK\"\n\ndef send(message, webhook):\n \n conn = http.client.HTTPSConnection(\"discordapp.com\")\n \n payload = \"------WebKitFormBoundary7MA4YWxkTrZu0gW\\r\\nContent-Disposition: form-data; name=\\\"content\\\"\\r\\n\\r\\n\" + message + \"\\r\\n------WebKitFormBoundary7MA4YWxkTrZu0gW--\"\n \n headers = {\n 'content-type': \"multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW\",\n 'cache-control': \"no-cache\",\n }\n \n conn.request(\"POST\", webhook, payload, headers)\n \n res = conn.getresponse()\n data = res.read()\n \n print(data.decode(\"utf-8\"))\n\ns = Steem()\nsleeping = Account(\"randowhale\")['json_metadata']['config']['sleep']\n\nif sleeping:\n do=False\nelse:\n print(\"Awake!\")\n send(\"***@randowhale is awake!***\",url)\n\n\n","repo_name":"omiq/steemit-scripts","sub_path":"randowhale.py","file_name":"randowhale.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"15026816565","text":"import pytube\nimport PySimpleGUI as pg\n\n\nlayout = [\n [pg.Text('Video source'), pg.InputText(), pg.Submit()]\n]\nwindow = pg.Window('YT_downloader', layout)\nwhile True:\n event, values = window.read()\n if event in (None, 'Exit'):\n break\n if event == pg.Submit():\n print(pg.InputText())\n yt = pytube.YouTube(str(values))\n stream = yt.streams.first()\n stream.download('D:\\github\\YT_downloader')\nwindow.close() ","repo_name":"GluhovDmitry/YT_downloader","sub_path":"YT-downloader.py","file_name":"YT-downloader.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"1252988987","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 12 16:31:54 2019\n\n@author: chiilee\n\"\"\"\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport math\n\nratio=[]\n\nratiolog=[]\n\nweaken_launch=0.00\nweaken_saturated=1330 \nratio_max=20\nratio_min=0.05 \n \nmeltvol = np.arange(0,1500,1)\nfor kk in range(0,len(meltvol)):\n vis_down1 = (math.log(ratio_min)-math.log(ratio_max))*(meltvol[kk]-weaken_launch)/(weaken_saturated-weaken_launch)+math.log(ratio_max)\n vis_down2 = math.exp(vis_down1)\n ratiolog.append(vis_down1)\n #ratio.append(vis_down2)\n ratio.append(1)\n\n \n \nfig=plt.figure(figsize=(6,4))\nax = fig.add_subplot(111)\n#line = ax.plot(meltvol, ratio, '-', color='red', lw=3, label='decay rate ratio') \nline = ax.plot(meltvol, ratio, '-', color='black', lw=3, label='Orgi.') \nline = ax.plot(meltvol, r1, '-', color='red', lw=2, label='10-0.1') \nline = ax.plot(meltvol, r2, '-', color='green', lw=2, label='5-0.2') \nline = ax.plot(meltvol, r3, '-', color='blue', lw=2, label='20-0.05') \n\n\n\n\nplt.grid()\nplt.xlim(0,1500)\nplt.ylim(0,10)\nplt.xlabel('Temperature',fontsize=12, color='black')\nplt.ylabel('Decay rate ratio',fontsize=12, color='black')\n\nplt.legend(loc='upper right')\nplt.savefig('/home/chiilee/Pic/theory/'+'/pic_MeltDecay'+'.png') ","repo_name":"chiilee/python_for_flac","sub_path":"theoryplot_MeltDecay.py","file_name":"theoryplot_MeltDecay.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"74705907262","text":"#coding:UTF-8\n#__autor__='wyxces'\n\nfrom ddt import ddt, data, unpack, file_data\nimport unittest\n\n@ddt\nclass UnittestDdt(unittest.TestCase):\n @data(1)\n def test_normal(self, value):\n print('----test_normal----(value,1) ', (value,1))\n try:\n self.assertEqual(value,1)\n print(value,'= 1')\n except AssertionError as msg:\n print(msg)\n\n @data(2,3,4)\n def test_normals(self, value):\n print('----test_normals----(value,2)', (value,2))\n try:\n self.assertEqual(value,2)\n print(value, '= 2')\n except AssertionError as msg:\n print(msg)\n\n @data((1,2),(2,4))\n @unpack\n def test_tuple(self, value1, value2):\n print('----test_tuple----(value1+1, value2)',value1, '+1', value2)\n try:\n self.assertEqual(value1+1, value2)\n print(value1, '+1 = ', value2)\n except AssertionError as msg:\n print(msg)\n\n @data([1,2,3],[2,3,4])\n @unpack\n def test_list(self,value1, value2, value3):\n print('----test_list----(value1 + value2, value3)', value1, '+', value2, value3)\n try:\n self.assertEqual(value1 + value2, value3)\n print(value1, '+', value2 , ' = ', value3)\n except AssertionError as msg:\n print(msg)\n\n @data({'key1':1,'key2':2},{'key1':3,'key2':4})\n @unpack\n def test_dict(self, key1, key2):\n print('----test_tuple----(key1 * 2, key2)',key1, '* 2', key2)\n try:\n self.assertEqual(key1 * 2, key2)\n print(key1, '* 2 = ', key2)\n except AssertionError as msg:\n print(msg)\n\n @file_data(r'D:\\code\\myPython\\unittestAndDdt\\ddtTest.json')\n def test_json(self,json_value):\n print('----test_json----(json_value)', (json_value))\n print(json_value)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"xin00killo/myPython","sub_path":"unittestAndDdt/unittestDdt.py","file_name":"unittestDdt.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"39661306631","text":"import os\nimport gspread\nfrom google.cloud import videointelligence\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom uri import done\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'credentials.json'\n\n# Set up the scope\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n\n# Instantiate the client\ncreds = ServiceAccountCredentials.from_json_keyfile_name('credentials.json', scope)\nclient = gspread.authorize(creds)\n\n# Open the Google Spreadsheet by its name (Make sure you have shared it with the client email)\nsheet = client.open('SPREADSHEET_NAME').sheet1\n\ndef update_transcription_to_sheet(transcription):\n # Update cell B1 with the actual transcription result\n sheet.update('B1', transcription)\n\ndef videoinspector(video_urii, transcribe=True):\n # Set up the video intelligence client\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.Feature.SPEECH_TRANSCRIPTION]\n\n # Define the configurations as raw dictionaries\n config = {\"language_code\": \"en-US\", \"enable_automatic_punctuation\": True}\n video_context = {\"speech_transcription_config\": config}\n\n # Submit the video transcription request\n operation = video_client.annotate_video(\n request={\n \"features\": features,\n \"input_uri\": video_urii,\n \"video_context\": video_context,\n }\n )\n\n if transcribe:\n result = operation.result()\n\n annotation_results = result.annotation_results[0]\n transcription = \"\"\n for speech_transcription in annotation_results.speech_transcriptions:\n for alternative in speech_transcription.alternatives:\n transcription += \"Transcription: {}\\n\".format(alternative.transcript)\n transcription += \"Confidence: {}\\n\\n\".format(alternative.confidence)\n\n update_transcription_to_sheet(transcription)\n\n# Display the \"Processing. Please wait...\" message in cell B1\nsheet.update('B1', \"Processing. Please wait...\")\n\n# Create the video URI\nvideo_urii = done\n\n# Call the videoinspector function\nvideoinspector(video_urii, transcribe=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"igadbk/SpeechTranscription","sub_path":"done.py","file_name":"done.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"5407268871","text":"#import used\nfrom bs4 import BeautifulSoup\n\n\n# We get the title of the movie\ndef title (soup): \n title = soup.select(\"#firstHeading\")[0].text #We used select(\"#firstHeading\") function to take the title\n return title\n\n\n# In this part we get the INTRO of the movie\ndef intro(soup):\n\n try:\n sec = soup.findAll('p')[0] #to get the intro we started parsing all \"p\" in wikipedia html page\n if sec == soup.find(\"p\", class_=\"mw-empty-elt\"):\n section_intro = soup.findAll('p')[1] #In this part we go ahead in case the intro has more than one paragraph\n \n else:\n section_intro = sec\n nextNode = section_intro\n intro = [] #create a list to append all intro lines\n intro.append(nextNode.text)\n\n while True: #create a while loop to make sure you take all the paragraphs and stop when they are over.\n nextNode = nextNode.find_next_sibling()\n if nextNode and nextNode.name == 'p':\n intro.append(nextNode.text)\n\n else: \n break \n intro_s = \"\"\n\n for ele in intro: \n intro_s += ele\n \n return intro_s\n \n except IndexError:\n intro_s = None\n return intro_s\n\n# In this part we get the PLOT of the movie\ndef plot (soup):\n try: \n \n sec = soup.findAll('h2')[0] #to get the plot we started parsing all 'h2' in wikipedia html page\n if sec.text == 'Contents' or sec.text == 'Cast': #we skip all not necessary information. \n section_plot = soup.findAll('h2')[1] #we repeat the same opereation for different 'h2' lines\n if section_plot.text == 'Cast': \n section_plot = soup.findAll('h2')[1]\n else:\n section_plot = sec\n nextNode = section_plot.find_next_sibling('p')\n plot = [] # create a list to append plot lines\n\n while True: #create a while loop to make sure you take all the paragraphs and stop when they are over.\n if nextNode and nextNode.name == 'p':\n plot.append(nextNode.text)\n nextNode = nextNode.find_next_sibling()\n else:\n break \n plot_s = \"\"\n\n for ele in plot: \n plot_s += ele\n return plot_s\n except IndexError:\n plot_s = None \n return plot_s\n\n# In this part we get the InfoBox of the movie\ndef infobox(soup):\n try:\n table = soup.find('table', class_='infobox vevent') #We started taking information from the infobox starting from the table in html files\n nextNode = table\n table2 = table.find_all('tr') \n dic={} # create a dictionary to store all importart values\n for th in table2[1:]:\n if th.find('th'): \n dic[th.find('th').text] = th.find('td').get_text(strip=True, separator='|').split('|') #split lines \n \n standard_dic = {\n \"Directed by\" : \"\",\n \"Produced by\": \"\",\n \"Written by\": \"\",\n \"Starring\": \"\",\n \"Music by\": \"\", \n \"Release date\": \"\",\n \"Running time\": \"\",\n \"Country\": \"\",\n \"Language\": \"\",\n \"Budget\": \"\"} \n\n \n shared_items = {k: dic[k] for k in dic.keys() & standard_dic.keys()} # In this part we check if the keys of the infobox are the same as the ones requested\n\n for k, v in shared_items.items(): # We transform the list into strings\n shared_items[k] = \", \".join(v)\n\n value = { k : standard_dic[k] for k in set(standard_dic) - set(dic) } # Difference, we would like to find the missing INFO of this movie\n\n value = {k: None if not v else v for k, v in value.items() } # Replace missing INFO with NaN\n\n final = dict(shared_items, **value) # Let's combine these two dictionaries\n\n return final\n \n except AttributeError: #this except is usuful to compare all missing values in the first dictionary Infobox.\n final = {\n \"Directed by\" : None,\n \"Produced by\": None,\n \"Written by\": None,\n \"Starring\": None,\n \"Music by\": None, \n \"Release date\": None,\n \"Running time\": None,\n \"Country\": None,\n \"Language\": None,\n \"Budget\": None} \n \n return final\n","repo_name":"LucaScofano/Homework-3-ADM","sub_path":"parser_utils.py","file_name":"parser_utils.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"2590002279","text":"import sys\nimport math\n\n\ndef f(x):\n return x\n\nn = int(input())\n\nd = {}\n\nfor i in range(n):\n x = int(input())\n if x not in d:\n d[x] = f(x)\n print(d[x])\n","repo_name":"nizhikebinesi/code_problems_python","sub_path":"stepik/programming_on_python/3.2/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"24"} +{"seq_id":"34318549586","text":"# -*- coding: utf-8 -*-\n\n# Based on a script written by Sasank Chilamkurthy on BSD licence\n# Source: https://github.com/pytorch/tutorials/blob/master/beginner_source/transfer_learning_tutorial.py\n\n# The word embedding part was sourced from:\n# Secondary https://github.com/kavgan/nlp-in-practice/blob/master/pre-trained-embeddings/Pre-trained%20embeddings.ipynb\n\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\n\nimport gensim.downloader as api\nfrom gensim.models import Word2Vec\nfrom gensim.models import KeyedVectors\nfrom gensim.test.utils import get_tmpfile\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nprint(\"GPU available: %d\" % (torch.cuda.is_available()))\n\n######################################################################\n# Load Data\n# ---------\n#\n# We will use torchvision and torch.utils.data packages for loading the\n# data.\n#\n# The problem we're going to solve today is to train a model to classify\n# **ants** and **bees**. We have about 120 training images each for ants and bees.\n# There are 75 validation images for each class. Usually, this is a very\n# small dataset to generalize upon, if trained from scratch. Since we\n# are using transfer learning, we should be able to generalize reasonably\n# well.\n#\n# This dataset is a very small subset of imagenet.\n#\n# .. Note ::\n# Download the data from\n# `here `_\n# and extract it to the current directory.\n\ndef loadDataset(dataset_name = \"hymenoptera\"):\n print(\"Loading dataset \\\"%s\\\"...\" % (dataset_name))\n # Data augmentation and normalization for training\n # Just normalization for validation\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n if dataset_name == \"hymenoptera\":\n data_dir = 'data/hymenoptera_data'\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n class_names = image_datasets['train'].classes\n elif dataset_name == \"cifar-10\":\n data_dir = 'data'\n image_datasets = {x: datasets.CIFAR10(data_dir,\n train=(x=='train'),\n transform=data_transforms[x],\n download=True)\n for x in ['train', 'val']}\n\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n class_names = [\"plane\", \"car\", \"bird\", \"cat\",\n \"deer\", \"dog\", \"frog\", \"horse\", \"ship\", \"truck\"]\n else:\n raise Error(\"Unknown dataset!\")\n\n print(\"Datasets sizes: %s\" % (str(dataset_sizes)))\n print(\"# classes = %d ('%s')\" % (len(class_names), \"', '\".join(class_names)))\n print()\n\n return dataloaders, dataset_sizes, class_names, len(class_names)\n\n######################################################################\n# Load embeddings\n\ndef loadEmbeddings(model_name = \"glove-twitter-25\", dataset_name = \"hymenoptera\"):\n print(\"Loading embedding model \\\"%s\\\"...\" % (model_name))\n\n em_path = model_name+\"-\"+dataset_name+\".kv\"\n if not os.path.isfile(em_path):\n if model_name == \"GoogleNews-vectors-negative300\":\n # Source: https://github.com/mmihaltz/word2vec-GoogleNews-vectors\n # Load pretrained model (since intermediate data is not included, the model cannot be refined with additional data)\n embeddings = KeyedVectors.load_word2vec_format(model_name + '.bin', binary=True).wv\n else:\n embeddings = api.load(model_name).wv\n #embeddings.save(em_path)\n class_embeddings = torch.Tensor(embeddings[class_names])\n torch.save(class_embeddings, em_path)\n else:\n #embeddings = KeyedVectors.load(em_path, mmap='r')\n class_embeddings = torch.load(em_path)\n #class_embeddings = torch.Tensor(embeddings[class_names])\n\n print(\"Embeddings shape: %s\" % (str(class_embeddings.shape)))\n print()\n\n return class_embeddings\n\n######################################################################\n# Training the model\n# ------------------\n#\n# Now, let's write a general function to train a model. Here, we will\n# illustrate:\n#\n# - Scheduling the learning rate\n# - Saving the best model\n#\n# In the following, parameter ``scheduler`` is an LR scheduler object from\n# ``torch.optim.lr_scheduler``.\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n targets = class_embeddings[labels]#.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n\n # print()\n # print(class_embeddings.shape)\n # print(targets.shape)\n # print(outputs.shape)\n # print(outputs[0].shape)\n # print(torch.nn.functional.cosine_similarity(outputs[0].unsqueeze_(0), class_embeddings, dim=1))\n\n # print()\n # print(class_embeddings.shape)\n # print(class_embeddings)\n #\n # # print()\n # # print(outputs.shape)\n # # print(targets.shape)\n #\n # print()\n # print(outputs)\n # print(targets)\n #\n # print()\n # print(outputs - targets)\n #\n # #print()\n # #dist = torch.norm(outputs[0] - targets, dim=1, p=None)\n # #print(dist)\n\n preds = torch.LongTensor([torch.norm(output - class_embeddings, dim=1, p=None).topk(1, largest=False)[1] for output in outputs])\n # preds = torch.LongTensor([torch.nn.functional.cosine_similarity(output.unsqueeze_(0), class_embeddings, dim=1).topk(1, largest=False)[1] for output in outputs])\n\n # print(torch.norm(outputs[0] - class_embeddings, dim=1, p=None))\n # print(torch.norm(outputs[1] - class_embeddings, dim=1, p=None))\n # print(torch.norm(outputs[2] - class_embeddings, dim=1, p=None))\n # print(torch.norm(outputs[3] - class_embeddings, dim=1, p=None))\n #\n # print(preds.shape)\n # print(labels.data.shape)\n\n # loss = torch.norm(outputs - targets, dim=1, p=None)\n loss = criterion(outputs, targets)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n######################################################################\n# Finetuning the convnet\n# ----------------------\n#\n\n# dataset = \"hymenoptera\"\ndataset = \"cifar-10\"\n\nembeddings = \"glove-twitter-25\"\n# embeddings = \"glove-wiki-gigaword-100\"\n# embeddings = \"GoogleNews-vectors-negative300\"\n######################################################################\n\n\nprint()\n\ndataloaders, dataset_sizes, class_names, n_classes = loadDataset(dataset)\n\nclass_embeddings = loadEmbeddings(embeddings,dataset)\n\n# print(zip(class_names,class_embeddings))\n\n\nmodel_ft = models.resnet18(pretrained=True)\n\nnum_ftrs = model_ft.fc.in_features\n\n\n\nmodel_ft.fc = nn.Linear(num_ftrs, class_embeddings.shape[1])\n\n# TODO: Add a second last fully-connected layer\n# intermed = (n_classes+num_ftrs)//2\n# model_ft.fc = nn.Sequential(\n# nn.Linear(num_ftrs, intermed)\n# , embedding_layer\n# )\n\n# This allows to set the learning rate for different layers\npar_lrs = {\n 'firstlayer' : {'params' : [], 'lr': 0.0001},\n 'scnlastlayer' : {'params' : [], 'lr': 0.0001},\n 'lastlayers' : {'params' : [], 'lr': 0.001},\n 'others' : {'params' : [], 'lr': 0.0},\n}\n\nfor name,param in model_ft.named_parameters():\n print(name)\n if name.startswith(\"conv1.\"):\n par_lrs[\"firstlayer\"]['params'].append(param)\n elif name.startswith(\"layer4.1\"):\n par_lrs[\"scnlastlayer\"]['params'].append(param)\n elif name.startswith(\"fc\"):\n par_lrs[\"lastlayers\"]['params'].append(param)\n else:\n par_lrs[\"others\"]['params'].append(param)\nprint()\n\nfor i in par_lrs:\n print(i + \":\\t\" + str(len(par_lrs[i]['params'])) + '\\t' + str(par_lrs[i]['lr']))\n if par_lrs[i]['lr'] == 0.0:\n for param in par_lrs[i][\"params\"]:\n param.requires_grad = False\nprint()\n\n\nmodel_ft = model_ft.to(device)\n\n# TODO Use that geometric softmax idea!\n# criterion = nn.CrossEntropyLoss()\ncriterion = nn.MSELoss()\n# criterion = nn.CosineSimilarity()\n\n# 0. optimize only laste layer's parameter\n# optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\n# 1. optimize layers according to the settings (see a few lines above)\n# TODO: switch back to SGD\n# optimizer_ft = optim.SGD([par_lrs[i] for i in par_lrs if par_lrs[i]['lr'] != 0.0],\n# lr = 0.001,\n# momentum = 0.9\n# # , weight_decay = 0.9\n# )\noptimizer_ft = optim.Adam([par_lrs[i] for i in par_lrs if par_lrs[i]['lr'] != 0.0],\n lr = 0.001\n # , weight_decay = 0.9\n )\n\n# Decay LR by a factor of 0.1 every 7 epochs\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\n######################################################################\n# Train and evaluate\n# ^^^^^^^^^^^^^^^^^^\n#\n# It should take around 15-25 min on CPU. On GPU though, it takes less than a\n# minute.\n#\n\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=25)\n\n######################################################################\n#\n","repo_name":"giopaglia/visual-embeddings","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":12997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"22849739966","text":"import os\nimport sys\nimport random\nimport pickle\nimport traceback\n\nimport numpy as np\nimport theano\nfrom theano import tensor as T\n\nfrom modules.sentence import tokenizer, read, detokenize\nfrom modules.text import Encoded\nfrom modules.model import NMT\nfrom modules.bleu import BLEU\nfrom modules.chrF import chrF\nfrom modules.search import beam_with_coverage\n\nfrom prompter import prompt, yesno\nfrom collections import Counter\nfrom arghandler import ArgumentHandler, subcmd\nfrom datetime import datetime\nfrom time import time\nfrom pprint import pprint\n\nfrom nltk import word_tokenize\n\nfrom app import logger as log\n\n@subcmd('initdb', help=\"Initialize Database\")\ndef initdb(parser, context, args):\n\n\tfrom app import db\n\n\tdb.create_all()\n\tlog.info('SQL database created.', 'green')\n\n@subcmd('dropdb', help='Drop Database')\ndef dropdb(parser, context, args):\n\n\tfrom app import db\n\n\tif yesno('Are you sure you want to lose all your sql data?', default='yes'):\t\n\t\tdb.drop_all()\n\t\tlog.info('SQL database has been deleted.', 'green')\n\n@subcmd('runserver', help=\"Run the Server\")\ndef runserver(parser, context, args):\n\n\tfrom app import app\n\n\tapp.run(\n\t\thost=app.config['HOST'],\n\t\tport=app.config['PORT'],\n\t\tdebug=app.config['DEBUG'])\n\n@subcmd('create-encoder', help=\"Encoder tool\")\ndef create_encoder(parser, context, args):\n\n\tparser.add_argument('--files', required=True, metavar='FILE', type=str,\n\t\tnargs='+',\n\t\thelp='File to process')\n\n\tparser.add_argument('--vocabulary-size', type=int, default=50000,\n\t\thelp='Maximum number of word in the vocabulary')\n\n\tparser.add_argument('--char-size', type=int, default=200,\n\t\thelp='Maximum number of characters in the vocabulary')\n\n\tparser.add_argument('--char-count', type=int, default=1,\n\t\thelp='Minimum count of characters in the vocabulary (all characters are\\\n\t\t\tincluded by default)')\n\n\tparser.add_argument('--tokenizer', type=str, default='word',\n\t\tchoices=('space', 'char', 'word'),\n\t\thelp='Tokenizer flag (space, char, word)')\n\n\tparser.add_argument('--lowercase', action='store_true',\n\t\thelp='Lowercase the data')\n\n\tparser.add_argument('--hybrid', action='store_true',\n\t\thelp='Create a hybrid word/character vocabulary')\n\n\tparser.add_argument('--save-to', required=True, metavar='FILE', type=str,\n\t\thelp='Output file name')\n\n\targs = parser.parse_args(args)\n\n\tfrom modules.text import TextEncoder\n\n\tif args.tokenizer == 'char':\n\t\ttokenize = lambda s: list(s.strip())\n\telif args.tokenizer == 'space' or args.tokenizer == 'bpe':\n\t\ttokenize = str.split\n\telif args.tokenizer == 'word':\n\t\timport nltk\n\t\tfrom nltk import word_tokenize as tokenize\n\n\ttoken_count = Counter()\n\tchar_count = Counter()\n\n\tcharacter = args.tokenizer == 'char' \n\n\tfor filename in args.files:\n\t\tlog.info('Processing %s' % os.path.basename(filename))\n\t\twith open(filename, 'rt') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.lower() if args.lowercase else line\n\t\t\t\ttokens = tokenize(line)\n\t\t\t\ttoken_count.update(tokens)\n\t\t\t\tif args.hybrid:\n\t\t\t\t\tchar_count.update(''.join(tokens))\n\t\t\tf.close()\n\n\tlog.info('Creating %s encoder' % os.path.splitext(args.save_to)[0])\n\tif args.hybrid:\n\t\tchar_encoder = TextEncoder(\n\t\t\t\tcounts = char_count,\n\t\t\t\tmin_count = char_count,\n\t\t\t\tmax_vocab = args.char_size,\n\t\t\t\tspecial=('',))\n\t\tencoder = TextEncoder(\n\t\t\t\tcounts = token_count,\n\t\t\t\tmax_vocab = args.vocabulary_size,\n\t\t\t\tsub_encoder = char_encoder)\n\telse:\n\t\tencoder = TextEncoder(\n\t\t\t\tcounts=token_count, \n\t\t\t\tmax_vocab=args.vocabulary_size,\n\t\t\t\tmin_count=char_count if args.tokenizer == 'char' else None,\n\t\t\t\tspecial=('', '') + (() if args.tokenizer == 'char' else ('',))\n\t\t\t)\n\n\tif os.path.isfile(os.path.splitext(args.save_to)[0] + \".vocab\"):\n\t\tif not yesno('Encoder already exist. Replace?', default='yes'):\n\t\t\targs.save_to = prompt('%s' % log('Enter new encoder name:'))\n\n\tlog.info('Exporting %s encoder' % os.path.basename(args.save_to))\n\twith open(os.path.splitext(args.save_to)[0] + \".vocab\", 'wb') as f:\n\t\tpickle.dump(encoder, f, -1)\n\t\tf.close()\n\n\tlog.info('Success')\n\n@subcmd('create-model', help=\"Create new model\")\ndef create_model(parser, context, args):\n\n\tparser.add_argument('--source-encoder', type=str, metavar='FILE',\n\t\tdefault=None, required=True,\n\t\thelp='load source vocabulary ')\n\n\tparser.add_argument('--target-encoder', type=str, metavar='FILE',\n\t\tdefault=None, required=True,\n\t\thelp='load target vocabulary')\n\n\tparser.add_argument('--source-tokenizer', type=str,\n\t\tchoices=('word', 'space', 'char', 'bpe'), default='word',\n\t\thelp='Type of Preprocessing source text')\n\n\tparser.add_argument('--target-tokenizer', type=str,\n\t\tchoices=('word', 'space', 'char', 'bpe'), default='char',\n\t\thelp='Type of Preprocessing target text')\n\n\tparser.add_argument('--alpha', type=float, default=0.01, metavar='X',\n\t\thelp='Length penalty weight during beam translation')\n\n\tparser.add_argument('--beta', type=float, default=0.4, metavar='X',\n\t\thelp='Coverage penalty weight during beam translation')\n\n\tparser.add_argument('--gamma', type=float, default=1.0, metavar='X',\n\t\thelp='Over attention penalty weight during beam translation')\n\n\tparser.add_argument('--decoder-gate', type=str,\n\t\tchoices=('lstm', 'context'), default='lstm',\n\t\thelp='Tyoe of decoder gate (lstm or context)')\n\n\tparser.add_argument('--len-smooth', type=float, default=5.0, metavar='X',\n\t\thelp='Smoothing constant for length penalty during beam translation')\n\n\tparser.add_argument('--word-embedding-dims', type=int, metavar='N',\n\t\tdefault=256, \n\t\thelp='Size of word embeddings')\n\n\tparser.add_argument('--target-embedding-dims', type=int, metavar='N',\n\t\tdefault=None, \n\t\thelp='Size of target embeddings (default: size of input word or char embedding')\n\n\tparser.add_argument('--char-embedding-dims', type=int, metavar='N',\n\t\tdefault=64, \n\t\thelp='Size of character embeddings')\n\n\tparser.add_argument('--dropout', type=float, metavar='FRACTION',\n\t\tdefault=0.0, \n\t\thelp='Use dropout for non-recurrent connections with the given factor')\n\n\tparser.add_argument('--encoder-state-dims', type=int, metavar='N',\n\t\tdefault=256, \n\t\thelp='Size of encoder state')\n\n\tparser.add_argument('--decoder-state-dims', type=int, metavar='N',\n\t\tdefault=512, \n\t\thelp='Size of decoder state')\n\n\tparser.add_argument('--attention-dims', type=int, metavar='N',\n\t\tdefault=256, \n\t\thelp='Size of attention vectors')\n\n\tparser.add_argument('--alignment-loss', type=float, metavar='X',\n\t\tdefault=0.0, \n\t\thelp='Alignment cross-entropy contribution to loss function (DEPRECATED)')\n\n\tparser.add_argument('--alignment-decay', type=float, metavar='X',\n\t\tdefault=0.9999, \n\t\thelp='Decay factor of alignment cross-entropy contribution (DEPRECATED)')\n\n\tparser.add_argument('--layer-normalization', action='store_true',\n\t\thelp='Use layer normalization')\n\n\tparser.add_argument('--recurrent-dropout', type=float, metavar='FRACTION',\n\t\tdefault=0.0, \n\t\thelp='Use dropout for recurrent connections with the given factor')\n\t\n\tparser.add_argument('--source-lowercase', action='store_true',\n\t\thelp='Convert source text to lowercase before processing')\n\n\tparser.add_argument('--target-lowercase', action='store_true',\n\t\thelp='convert target text to lowercase before processing')\n\n\tparser.add_argument('--backwards', action='store_true',\n\t\thelp='Reverse the order (token level) of all input data')\n\n\tparser.add_argument('--save-model', type=str, metavar='FILE',\n\t\tdefault=None, required=True,\n\t\thelp='Output Model file')\n\n\targs = parser.parse_args(args)\n\n\tlog.info('Loading Source language encoder')\n\twith open(args.source_encoder, 'rb') as f:\n\t\targs.source_encoder = pickle.load(f)\n\t\tf.close()\n\n\tlog.info('Loading Target language encoder')\n\twith open(args.target_encoder, 'rb') as f:\n\t\targs.target_encoder = pickle.load(f)\n\t\tf.close()\n\n\tif args.target_embedding_dims is None:\n\t\targs.target_embedding_dims = (\n\t\t\targs.char_embedding_dims\n\t\t\tif args.target_tokenizer == 'char'\n\t\t\telse args.word_embedding_dims)\n\n\tlog.info('Configuring model')\n\tconfig = {\n\t\t'ts_train': 0, \t\t\t\t\t\t\t\t\t\t\t# total training time in seconds\n\t\t'tn_epoch': 0, \t\t\t\t\t\t\t\t\t\t\t# total number of epochs\n\t\t'source_encoder': args.source_encoder,\t\t\t\t\t#\n\t\t'target_encoder': args.target_encoder,\t\t\t\t\t#\n\t\t'source_lowercase': args.source_lowercase,\t\t\t\t# False\n\t\t'source_tokenizer': args.source_tokenizer,\t\t\t\t# word\n\t\t'target_lowercase': args.target_lowercase,\t\t\t\t# False\n\t\t'target_tokenizer': args.target_tokenizer,\t\t\t\t# char\n\t\t'source_embedding_dims': args.word_embedding_dims,\t\t# 256\n\t\t'source_char_embedding_dims': args.char_embedding_dims,\t# 64\n\t\t'target_embedding_dims': args.target_embedding_dims,\t# None\n\t\t'char_embeddings_dropout': args.dropout,\t\t\t\t# 0.0\n\t\t'embeddings_dropout': args.dropout,\t\t\t\t\t\t# 0.0\n\t\t'recurrent_dropout': args.recurrent_dropout,\t\t\t# 0.0\n\t\t'dropout': args.dropout,\t\t\t\t\t\t\t\t# 0.0\n\t\t'encoder_state_dims': args.encoder_state_dims,\t\t\t# 256\n\t\t'decoder_state_dims': args.decoder_state_dims,\t\t\t# 512\n\t\t'attention_dims': args.attention_dims,\t\t\t\t\t# 256\n\t\t'layernorm': args.layer_normalization,\t\t\t\t\t# False\n\t\t'alignment_loss': args.alignment_loss,\t\t\t\t\t# 0.0\n\t\t'alignment_decay': args.alignment_decay,\t\t\t\t# 0.9999\n\t\t'backwards': args.backwards,\t\t\t\t\t\t\t# False\n\t\t'decoder_gate': args.decoder_gate,\t\t\t\t\t\t# lstm\n\t\t'alpha': args.alpha,\t\t\t\t\t\t\t\t\t# 0.01\n\t\t'beta': args.beta,\t\t\t\t\t\t\t\t\t\t# 0.4\n\t\t'gamma': args.gamma,\t\t\t\t\t\t\t\t\t# 1.0\n\t\t'decoder_gate': args.decoder_gate,\t\t\t\t\t\t# lstm\n\t\t'len_smooth': args.len_smooth,\t\t\t\t\t\t\t# 5.0\n\t\t'encoder_layernorm': 'ba2' if args.layer_normalization else False,\n\t\t'decoder_layernorm': 'ba2' if args.layer_normalization else False\n\t}\n\n\tif not config['source_encoder'].sub_encoder:\n\t\tlog.warning('Source encoder is not hybrid')\n\n\tlog.info('Checking existence')\n\tif os.path.isfile(args.save_model):\n\t\tif not yesno(log('Model %s exist, replace? ' % os.path.basename(args.save_model)), default='yes'):\n\t\t\targs.save_model = prompt(log('New model name: '))\n\n\tlog.info('Creating model')\n\tmodel = NMT('nmt', config)\n\t\n\tlog.info('Saving %s' % os.path.basename(args.save_model))\n\twith open(args.save_model, 'wb') as f:\n\t\tpickle.dump(config, f)\n\t\tmodel.save(f)\n\t\tf.close()\n\n\tlog.info('Model Saved')\n\n@subcmd('train', help=\"Model Trainer\")\ndef train(parser, context, args):\n\n\tparser.add_argument('--load-model', type=str, metavar='FILE(s)',\n\t\tdefault=None, required=True,\n\t\thelp='Existing Model file')\n\n\tparser.add_argument('--translate-every', type=int, default=250, metavar='N',\n\t\thelp='Translate test set every N training batches')\n\n\tparser.add_argument('--train-data', type=str, metavar='FILE',\n\t\trequired=True,\n\t\thelp='Name of the Training data file')\n\n\tparser.add_argument('--source-test-data', type=str, metavar='FILE',\n\t\tdefault=None, required=True,\n\t\thelp='Name of the source test-set file')\n\n\tparser.add_argument('--target-test-data', type=str, metavar='FILE',\n\t\tdefault=None, required=True,\n\t\thelp='Name of the target test-set file')\n\n\tparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n\t\thelp='Minibatch size of training set')\n\n\tparser.add_argument('--batch-budget', type=float, default=32, metavar='N',\n\t\thelp='Minibatch budget during training. The optimal value depends on model \\\n\t\t\t\tsize and available GPU memory. Try values between 20 and 200')\n\n\tparser.add_argument('--reset-optimizer', action='store_true',\n\t\thelp='Reset model optimizer')\n\n\tparser.add_argument('--min-char-count', type=int, metavar='N',\n\t\thelp='Drop all characters with count < N in training data')\n\n\tparser.add_argument('--learning-rate', type=float, default=None, metavar='X',\n\t\thelp='Override the default learning rate for optimizer with X')\n\n\tparser.add_argument('--random-seed', type=int, default=123, metavar='N',\n\t\thelp='Random seed for repeatable sorting of data')\n\n\tparser.add_argument('--max-target-length', type=int, default=1000, metavar='N',\n\t\thelp='Maximum length of target sentence during translation (unit given by --target-tokenizer)')\n\n\tparser.add_argument('--reference', type=str, metavar='FILE', default=None,\n\t\thelp='Name of the reference translation file')\n\n\tparser.add_argument('--nbest-list', type=int, default=0, metavar='N',\n\t\thelp='Print n-best list in translation model')\n\n\tparser.add_argument('--beam-size', type=int, default=8, metavar='N',\n\t\thelp='Beam size during translation')\n\n\tparser.add_argument('--train-for', type=int, default=1, metavar='N',\n\t\thelp='Train for N epochs')\n\n\targs = parser.parse_args(args)\n\n\tfrom bnas.optimize import iterate_batches\n\tfrom modules.largetext import ShuffledText, HalfSortedIterator\n\n\trandom.seed(args.random_seed)\n\n\twith open(args.load_model, 'rb') as f:\n\t\tlog.info('Loading %s configuration' % os.path.basename(args.load_model))\n\t\tconfig = pickle.load(f)\n\t\tmodel = NMT('nmt', config)\n\n\t\tlog.info('Loading %s weights' % os.path.basename(args.load_model))\n\t\tmodel.load(f)\n\n\t\tlog.info('Initializing Optimizer')\n\t\toptimizer = model.create_optimizer()\n\n\t\tif args.learning_rate is not None:\n\t\t\toptimizer.learning_rate = args.learning_rate\n\n\t\tif not args.reset_optimizer:\n\t\t\ttry:\n\t\t\t\toptimizer.load(f)\n\t\t\t\tlog.info('Continuing traning from Epoch %d, Update %d' % (config['tn_epoch'], optimizer.n_updates))\n\t\t\texcept EOFError:\n\t\t\t\tpass\n\n\tlogf = open(args.load_model +\n\t\t'.log', 'a', encoding='utf-8')\n\tevalf = open(args.load_model +\n\t\t'-eval.log', 'a', encoding='utf-8')\n\n\tlog.info('Initializing Source Text Tokenizer')\n\tsource_tokenizer = tokenizer(config['source_tokenizer'],\n\t\tlowercase=config['source_lowercase'])\n\n\tlog.info('Initializing Target Text Tokenizer')\n\ttarget_tokenizer = tokenizer(config['target_tokenizer'], \n\t\tlowercase=config['target_lowercase'])\n\n\tlog.info('Loading Source Language Testing data')\n\tsource_test_data = read(args.source_test_data,\n\t\tsource_tokenizer, config['backwards'])\n\t\n\tlog.info('Loading Target Language Testing data')\n\ttarget_test_data = read(args.target_test_data,\n\t\ttarget_tokenizer, config['backwards'])\n\n\ttarget_test_unencoded = target_test_data\n\tsource_test_data = [config['source_encoder'].encode_sequence(sent)\n\t\t\t\t\t\t\tfor sent in source_test_data]\n\ttarget_test_data = [config['target_encoder'].encode_sequence(sent)\n\t\t\t\t\t\t\tfor sent in target_test_data]\n\ttest_link_maps = [(None, None, None)]*len(source_test_data)\n\ttest_pairs = list(zip(source_test_data, target_test_data, test_link_maps))\n\n\tsource_sample_data = source_test_data\n\ttarget_sample_data = target_test_unencoded\n\n\tlog.info('Loading Training Data')\n\ttraining_data = open(args.train_data, 'rb')\n\tshuffled_training_data = ShuffledText(training_data, seed=args.random_seed)\n\n\tdef validate(test_pairs, start_time, optimizer, logf, sent_nr):\n\t\tresult = 0.\n\t\tatt_result = 0.\n\t\tt0 = time()\n\t\tfor batch_pairs in iterate_batches(test_pairs, args.batch_size):\n\t\t\ttest_x, test_y = model.prepare_batch(batch_pairs)\n\t\t\ttest_outputs, test_outputs_mask, test_attention = test_y\n\t\t\ttest_xent, test_xent_attention = model.xent_fun(*(test_x + test_y))\n\t\t\tscale = (test_outputs.shape[1] / (test_outputs_mask.sum()*np.log(2)))\n\t\t\tresult += test_xent * scale\n\t\t\tatt_result += test_xent_attention*scale\n\n\t\tprint('%d\\t%.3f\\t%.3f\\t%.3f\\t%d\\t%d' %\n\t\t\t(\n\t\t\t\tint(t0 - start_time),\t\t# Starting Time\n\t\t\t\tresult,\t\t\t\t\t\t# Result\n\t\t\t\tatt_result,\t\t\t\t\t# Attention Result\n\t\t\t\ttime() - t0,\t\t\t\t# End Time\n\t\t\t\toptimizer.n_updates,\t\t# Number of Update\n\t\t\t\tsent_nr\t\t\t\t\t\t# Number of Sentence Processed\n\t\t\t\t), file=logf, flush=True)\n\n\t\treturn result\n\n\tepochs, batch_nr, sent_nr = 0, 0, 0\n\tchrf_max, bleu_max = 0.0, 0.0\n\n\tlog.info('Starting training')\n\tlog.info('Press Ctrl + C to stop training...')\n\ttry:\n\t\twhile epochs < args.train_for:\n\n\t\t\tstart_time = time()\n\n\t\t\ttrain_samples = HalfSortedIterator(\n\t\t\t\t\t\titer(shuffled_training_data),\n\t\t\t\t\t\tmax_area=args.batch_budget*0x200,\n\t\t\t\t\t\tsource_tokenizer=source_tokenizer,\n\t\t\t\t\t\ttarget_tokenizer=target_tokenizer,\n\t\t\t\t\t\tlength=lambda pair: sum(map(len, pair)))\n\n\t\t\tfor sent_pairs in train_samples:\n\n\t\t\t\tprint('Number of Sentences: %d' % len(sent_pairs))\n\n\t\t\t\tsource_batch, target_batch = list(zip(*sent_pairs))\n\n\t\t\t\tif config['backwards']:\n\t\t\t\t\tsource_batch = [source_sent[::-1] for source_sent in source_batch]\n\t\t\t\t\ttarget_batch = [target_sent[::-1] for target_sent in target_batch]\n\n\t\t\t\tsource_batch = [config['source_encoder'].encode_sequence(source_sent)\n\t\t\t\t\t\t\t\tfor source_sent in source_batch]\n\t\t\t\t\n\t\t\t\ttarget_batch = [config['target_encoder'].encode_sequence(target_sent)\n\t\t\t\t\t\t\t\tfor target_sent in target_batch]\n\n\t\t\t\tbatch_link_maps = [(None, None, None)]*len(source_batch)\n\n\t\t\t\tbatch_pairs = list(zip(source_batch, target_batch, batch_link_maps))\n\n\t\t\t\tsent_nr += len(batch_pairs)\n\n\t\t\t\tx, y = model.prepare_batch(batch_pairs)\n\n\t\t\t\tt0 = time()\n\t\t\t\ttrain_loss = optimizer.step(*(x + y))\n\t\t\t\ttrain_loss *= (y[0].shape[1] / (y[1].sum()*np.log(2)))\n\t\t\t\tlog.info('Batch %2d:%4d has loss %.4f (%.2f s)' % (\n\t\t\t\t\tepochs + 1,\n\t\t\t\t\toptimizer.n_updates,\n\t\t\t\t\ttrain_loss,\n\t\t\t\t\ttime() - t0))\n\t\t\t\tif np.isnan(train_loss):\n\t\t\t\t\tlog.warning('NaN loss, aborting')\n\t\t\t\t\tsys.exit(1)\n\n\t\t\t\tmodel.lambda_a.set_value(np.array(\n\t\t\t\t\tmodel.lambda_a.get_value() * config['alignment_decay'],\n\t\t\t\t\tdtype=theano.config.floatX))\n\n\t\t\t\tbatch_nr += 1\n\n\t\t\tepochs += 1\n\t\t\tconfig['tn_epoch'] += 1\n\t\t\tconfig['ts_train'] += time() - start_time\n\n\t\t\t#Validate Model\n\t\t\tvalidate(test_pairs, start_time, optimizer, logf, sent_nr)\n\n\t\t\t#Test Translate\n\t\t\tt0 = time()\n\t\t\ttest_dec = list(model.translate(source_sample_data, encode=False))\n\t\t\tfor source, target, test in zip(\n\t\t\t\tsource_sample_data, target_sample_data, test_dec):\n\t\t\t\tlog.info('Source:' )\n\t\t\t\tlog.info('%s' % detokenize(\n\t\t\t\t\tconfig['source_encoder'].decode_sentence(source),\n\t\t\t\t\tconfig['source_tokenizer']))\n\t\t\t\tlog.info('')\n\t\t\t\tlog.info('Target:')\n\t\t\t\tlog.info('%s' % detokenize(target, config['target_tokenizer']))\n\t\t\t\tlog.info('')\n\t\t\t\tlog.info('Output:')\n\t\t\t\tlog.info('%s' % test)\n\t\t\t\tlog.info('-'*40)\n\t\t\tlog.info('Translation finished %.2f s' % (time() - t0))\n\n\t\t\tif config['target_tokenizer'] == 'char':\n\t\t\t\tsystem = [ detokenize(word_tokenize(s), 'space')\n\t\t\t\t\t\t\tfor s in test_dec ]\n\t\t\t\treference = [ detokenize(word_tokenize( detokenize(s, 'char')), 'space')\n\t\t\t\t\t\t\tfor s in target_sample_data]\n\t\t\telse:\n\t\t\t\tsystem = test_dec\n\t\t\t\treference = [ detokenize(s, config['target_tokenizer'])\n\t\t\t\t\t\t\t\tfor s in target_sample_data ]\n\n\t\t\tbleu_result = BLEU(system, [reference])\n\t\t\tchrf_result = chrF(reference, system)\n\t\t\tis_best = chrf_result[0] >= chrf_max\n\t\t\tchrf_max = max(chrf_result[0], chrf_max)\n\t\t\tbleu_max = max(bleu_result[0], bleu_max)\n\t\t\tlog.info('BLEU = %f (%f, %f, %f, %f, BP = %f)' % bleu_result)\n\t\t\tlog.info('chrF = %f (precision = %f, recall = %f)' % chrf_result)\n\n\t\t\tif evalf is not None:\n\t\t\t\tprint('%d\\t%.3f\\t%.3f\\t%d\\t%d' % (\n\t\t\t\t\tint(t0 - start_time),\n\t\t\t\t\tbleu_result[0],\n\t\t\t\t\tchrf_result[0],\n\t\t\t\t\toptimizer.n_updates,\n\t\t\t\t\tsent_nr\n\t\t\t\t), file=evalf, flush=True)\n\n\t\t\tif is_best:\n\t\t\t\tlog.info('Marking as best model...')\n\t\t\t\twith open(args.load_model + \".best\", 'wb') as f:\n\t\t\t\t\tpickle.dump(config, f)\n\t\t\t\t\tmodel.save(f)\n\t\t\t\t\toptimizer.save(f)\n\t\t\t\t\tf.close()\n\n\t\t\t#Save Model\n\t\t\tfilename = os.path.dirname(args.load_model) + '/%s-%d-%d.nlm' % (\n\t\t\t\tos.path.splitext(\n\t\t\t\t\tos.path.basename(args.load_model))[0],\n\t\t\t\tconfig['tn_epoch'],\n\t\t\t\toptimizer.n_updates)\n\t\t\t\n\t\t\tlog.info('Saving model at Epoch %d, Batch %d' % (config['tn_epoch'], optimizer.n_updates))\n\t\t\twith open(filename, 'wb') as f:\n\t\t\t\tpickle.dump(config, f)\n\t\t\t\tmodel.save(f)\n\t\t\t\toptimizer.save(f)\n\t\t\t\tf.close()\n\n\t\t\twith open(args.load_model, 'wb') as f:\n\t\t\t\tpickle.dump(config, f)\n\t\t\t\tmodel.save(f)\n\t\t\t\toptimizer.save(f)\n\t\t\t\tf.close()\n\n\t\tlog.info('Training Finished')\n\n\texcept KeyboardInterrupt:\n\t\tlog.info('Trainer Stopped')\n\n\texcept Exception:\n\t\tlog.warning('Exception found, see console...')\n\t\tprint(traceback.format_exc())\n\t\n\tif logf: logf.close()\n\tif evalf: evalf.close()\n\n@subcmd('translate', help='Translator tool')\ndef translator(parser, context, args):\n\n\tparser.add_argument('--load-model', type=str, metavar='FILE(s)',\n\t\thelp='Model file(s) to load from')\n\n\tparser.add_argument('--source-eval', type=str, metavar='FILE',\n\t\trequired=True,\n\t\thelp='Sentence to translate')\n\n\tparser.add_argument('--target-eval', type=str, metavar='FILE',\n\t\trequired=True,\n\t\thelp='Reference file')\n\n\tparser.add_argument('--nbest-list', type=int, metavar='N',\n\t\tdefault=0,\n\t\thelp='print n-best list in translation model')\n\n\tparser.add_argument('--random-seed', type=int, default=123, metavar='N',\n\t\thelp='Random seed for repeatable sorting of data')\n\n\tparser.add_argument('--beam-size', type=int, default=8, metavar='N',\n\t\thelp='Beam size during translation')\n\n\targs = parser.parse_args(args)\n\n\timport math\n\n\tfrom nltk import word_tokenize\n\tfrom nltk.translate.bleu_score import (modified_precision, closest_ref_length,\n\t\tbrevity_penalty, SmoothingFunction, sentence_bleu, corpus_bleu)\n\n\tfrom fractions import Fraction\n\n\tnbest = 0\n\thypotheses = []\n\n\tlog.info('Initializing Model')\n\twith open(args.load_model, 'rb') as f:\n\t\tlog.info('Loading %s configuration' % os.path.basename(args.load_model))\n\t\tconfig = pickle.load(f)\n\t\tmodel = NMT('nmt', config)\n\n\t\tlog.info('Loading %s weights' % os.path.basename(args.load_model))\n\t\tmodel.load(f)\n\t\tf.close()\n\n\tlog.info('Loading Source Evaluation data')\t\n\tsource_tokenizer = tokenizer(config['source_tokenizer'],\n\t\tlowercase=config['source_lowercase'])\n\tsource_eval = read(args.source_eval, source_tokenizer, config['backwards'])\n\n\tlog.info('Loading Target Evaluation data')\n\ttarget_tokenizer = tokenizer('word', \n\t\tlowercase=config['target_lowercase'])\n\treferences = read(args.target_eval, target_tokenizer, False)\n\n\tlog.info('Translating...')\n\t\n\toutput_file = open(os.path.dirname(args.source_eval) + '/result.data.eval',\n\t\t'w', encoding='utf-8')\n\n\tfor i, sent in enumerate(model.translate(source_eval, encode=True, nbest=nbest)):\n\t print(sent, file=output_file, flush=True)\n\t hypotheses.append(word_tokenize(sent))\n\n\toutput_file.close()\n\n\tlog.info('Process finished')\n\nif __name__ == '__main__':\n\twith open('logo.txt', 'r') as f:\n\t text = f.read()\n\t for line in text.split('\\n'):\n\t print(line)\n\t f.close()\n\thandler = ArgumentHandler(enable_autocompletion=True,\n\t\tdescription='FILIPINEU: Filipino - English Neural Machine Translation')\n\thandler.run()","repo_name":"redperiabras/filipineu","sub_path":"terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":22004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"23082419766","text":"\nfrom collections import Counter\n\nimport arrow\n\nfrom utils import retry_get_tree, slugify, update_data\n\n\ndef process_item_fn(row):\n try:\n desc = row.xpath(\n './/p[contains(@class, \"tweet-text\")]')[0].text_content().encode('utf8').strip()\n date = int(row.xpath('.//span/@data-time')[0])\n url = row.xpath('.//a[contains(@href, \"status/\")]/@href')\n if not url:\n return False\n url = 'https://twitter.com' + url[-1]\n handle_counter = Counter([x.text_content() for x in row.xpath(\n '//span[@class=\"username js-action-profile-name\"]//b')])\n name = handle_counter.most_common(1)[0][0] if handle_counter else ''\n likes = row.xpath('.//span[@class=\"ProfileTweet-actionCountForPresentation\"]/text()')\n twitter_item = {'_id': slugify(desc.decode()),\n 'name': name,\n 'date': str(arrow.get(date)),\n 'description': desc.decode(),\n 'url': url,\n 'likes': int(likes[-1]) if likes else 0}\n return twitter_item\n except:\n return False\n\n\ndef get_posts(conf):\n posts = []\n for handle in conf['twitter_handles']:\n print(handle)\n conf['url'] = 'https://twitter.com/{}'.format(handle)\n tree = retry_get_tree(conf['url'])\n rows = tree.xpath('//li[contains(@class, \"js-stream-item\")]')\n posts.extend([process_item_fn(row) for row in rows])\n return posts\n\n\ndef update(conf):\n conf.update({'source': 'twitter', 'doc_type': 'posts'})\n update_data(conf, get_posts(conf))\n","repo_name":"kootenpv/inthenews.io","sub_path":"scrapers/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"24"} +{"seq_id":"31503069158","text":"import sys\n\nbits = \"\"\nsteghedImage = sys.argv[1] #Image Path\nkeyWord = sys.argv[2] #Word that will be search\niteration = sys.argv[3] # Binary shifting value.\n\nprint(\"Searching:\",keyWord)\n\nfor seq in range(int(iteration)):\n print(seq,\"/\",iteration)\n with open(steghedImage, \"rb\") as f:\n data = f.read()\n\n data = data[seq:]\n\n for c in data:\n lsb = str(c & 0x1) # I got an LSB value.\n bits += lsb # I have collect all LSB to here\n\n bytess = [chr(int(bits[i:i+8], 2)) for i in range(0, len(bits), 8)]\n lsbstr = \"\".join(bytess)\n if keyWord in lsbstr:\n print(lsbstr)\n print(\"\\n Founded at\",seq,\". iteration\")\n break","repo_name":"bakialmaci/Python","sub_path":"StegSearcher/SteghideSearcher.py","file_name":"SteghideSearcher.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"10009060117","text":"\"\"\"Computes Morlet wavelet spectra of whitened joint angles.\n\nWhitens joint angles using global mean and variance.\nComputes parallelized Morlet wavelet transform. Save results.\n\"\"\"\nimport multiprocessing as mp\nimport numpy as np\nfrom sklearn.decomposition import IncrementalPCA\nfrom utilities import read_pickle, write_pickle\nimport config\n\n\ndef get_single_angle_wavelet_spectra(ang):\n \"\"\"\n Computes Morlet wavelet amplitudes over a single whitened angle.\n\n Parameters\n ----------\n angs : ndarray\n Whitened joint angles\n\n Returns\n -------\n wav : ndarray\n Morlet wavelet spectra of the whitened angle\n\n Notes\n -----\n Acknowledgements to Gordon Berman's Lab MotionMapper.\n Inspired by Kanishk Jain (kanishkbjain@gmail.com).\n \"\"\"\n len_ang = len(ang)\n wav = np.zeros((config.WAV_NUM_CHANNELS, len_ang))\n\n if not len_ang // 2:\n ang = np.concatenate((ang, [0]), axis=0)\n len_ang = len(ang)\n is_len_ang_odd = True\n else:\n is_len_ang_odd = False\n\n ang = np.concatenate(\n [np.zeros(int(len_ang / 2)), ang, np.zeros(int(len_ang / 2))], axis=0\n )\n modified_len_ang = len_ang\n len_ang = len(ang)\n scales = (config.WAV_OMEGA_0 + np.sqrt(2 + config.WAV_OMEGA_0 ** 2)) / (\n 4 * np.pi * config.WAV_F_CHANNELS\n )\n omega_values = (\n 2 * np.pi * np.arange(-len_ang / 2, len_ang / 2) / (len_ang * config.WAV_DT)\n )\n\n fourier_transform = np.fft.fft(ang)\n fourier_transform = np.fft.fftshift(fourier_transform)\n\n if is_len_ang_odd:\n idx = np.arange(\n (modified_len_ang / 2), (modified_len_ang / 2 + modified_len_ang - 2)\n ).astype(int)\n else:\n idx = np.arange(\n (modified_len_ang / 2), (modified_len_ang / 2 + modified_len_ang)\n ).astype(int)\n\n for i in range(config.WAV_NUM_CHANNELS):\n m_values = (np.pi ** (-0.25)) * np.exp(\n -0.5 * (-omega_values * scales[i] - config.WAV_OMEGA_0) ** 2\n )\n q_values = np.fft.ifft(m_values * fourier_transform) * np.sqrt(scales[i])\n q_values = q_values[idx]\n wav[i, :] = (\n np.abs(q_values)\n * (np.pi ** -0.25)\n * np.exp(\n 0.25 * (config.WAV_OMEGA_0 - np.sqrt(config.WAV_OMEGA_0 ** 2 + 2)) ** 2\n )\n / np.sqrt(2 * scales[i])\n )\n return wav.T\n\n\ndef get_parallel_wavelet_spectra(angs):\n \"\"\"\n Computes parallelized Morlet wavelet transform.\n\n Parameters\n ----------\n angs: array_like\n Whitened joint angles\n\n Returns\n -------\n wavs: ndarray\n Morlet wavelet amplitudes\n\n \"\"\"\n with mp.Pool(mp.cpu_count()) as pool:\n wavs = pool.map(\n get_single_angle_wavelet_spectra,\n [angs[:, j] for j in range(angs.shape[1])],\n )\n pool.close()\n pool.join()\n wavs = np.column_stack(wavs)\n return wavs\n\n\ndef get_wavelet_spectra(path_scaler_ang, path_ang, target_pickle_path):\n \"\"\"\n Computes wavelet spectra of whitened joint angles.\n\n Parameters\n ----------\n dep_pickle_paths : dict of pathlib.Path\n Paths of dependency pickle files to read\n target_pickle_path : pathlib.Path\n Path of target pickle file to save\n \"\"\"\n scaler_angs = read_pickle(path_scaler_ang)\n angs = read_pickle(path_ang)\n print(\"Shape of read joint angles:\", angs.shape)\n angs = scaler_angs.transform(angs)\n wavs = get_parallel_wavelet_spectra(angs)\n print(\"Shape of created wavelet spectra:\", wavs.shape)\n write_pickle(wavs, target_pickle_path)\n\n\ndef get_pca_fit_wavelet_spectra(dep_pickle_paths, target_pickle_path):\n \"\"\"Fit incremental PCA to wavelet spectra features.\"\"\"\n pca_wavs = IncrementalPCA(copy=False)\n for path in dep_pickle_paths[\"wav\"]:\n pca_wavs.partial_fit(read_pickle(path))\n del dep_pickle_paths[\"wav\"]\n write_pickle(pca_wavs, target_pickle_path)\n","repo_name":"alvaro-concha/animal-behavior-preprocessing","sub_path":"animal_behavior_preprocessing/wavelet_spectra.py","file_name":"wavelet_spectra.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"24"} +{"seq_id":"43865921896","text":"def solution(n, left, right):\n array = []\n\n for i in range(left, right + 1):\n s, r = divmod(i, n)\n if s >= r:\n array.append(s + 1)\n else:\n array.append(r + 1)\n\n return array\n\n# test case\nprint(solution(3, 2, 5))\nprint(solution(4, 7, 14))","repo_name":"ymink716/PS","sub_path":"programmers/2level/n^2 배열 자르기.py","file_name":"n^2 배열 자르기.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"9747454745","text":"# -*- coding: utf-8 -*-\n\n# Import python libs\nfrom __future__ import absolute_import\n\n# Import Salt Testing libs\nfrom salttesting import skipIf\nfrom salttesting.helpers import (\n ensure_in_syspath,\n requires_salt_modules,\n requires_system_grains,\n destructiveTest,\n)\nensure_in_syspath('../../')\n\n# Import salt libs\nimport integration\nimport salt.utils\n\n\ndef _find_new_locale(current_locale):\n for locale in ['en_US.UTF-8', 'de_DE.UTF-8', 'fr_FR.UTF-8']:\n if locale != current_locale:\n return locale\n\n\n@skipIf(salt.utils.is_windows(), 'minion is windows')\n@requires_salt_modules('locale')\nclass LocaleModuleTest(integration.ModuleCase):\n @requires_system_grains\n def test_get_locale(self, grains):\n locale = self.run_function('locale.get_locale')\n self.assertNotEqual(None, locale)\n\n @destructiveTest\n @requires_system_grains\n def test_gen_locale(self, grains):\n locale = self.run_function('locale.get_locale')\n new_locale = _find_new_locale(locale)\n ret = self.run_function('locale.gen_locale', [new_locale])\n self.assertEqual(True, ret)\n\n @destructiveTest\n @requires_system_grains\n def test_set_locale(self, grains):\n original_locale = self.run_function('locale.get_locale')\n locale_to_set = _find_new_locale(original_locale)\n self.run_function('locale.gen_locale', [locale_to_set])\n ret = self.run_function('locale.set_locale', [locale_to_set])\n new_locale = self.run_function('locale.get_locale')\n self.assertEqual(True, ret)\n self.assertEqual(locale_to_set, new_locale)\n self.run_function('locale.set_locale', [original_locale])\n","repo_name":"makinacorpus/salt","sub_path":"tests/integration/modules/locale.py","file_name":"locale.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"19059866352","text":"# -*- coding: utf-8 -*-\n\nimport uuid\nimport time\nimport datetime\nfrom warnings import warn\nfrom functools import wraps\n\nfrom requests import Session\n\ntry:\n from urllib import quote\nexcept ImportError:\n from urllib.parse import quote\n\nDIALECT = 'python'\n\nDEFAULT_API_VERSION = 1\n\nDEFAULT_BUILD_TITLE = 'Launch at {}'.format(\n datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S'),\n)\n\nDEFAULT_BUILD_NAME = str(hash(str(uuid.uuid4()) + str(time.time())))\n\nDEFAULT_CONNECTION_TIMEOUT = 3\n\n\nclass RemoteApiError(UserWarning):\n pass\n\n\ndef will_expected(status_code):\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n try:\n resp = f(*args, **kwargs)\n except BaseException as error:\n warn(\n '{}'.format(\n error.__class__.__name__,\n getattr(\n error, 'message',\n u' '.join(\n str(i) for i in getattr(error, 'args', []) if not isinstance(i, int)\n )\n ),\n ),\n RemoteApiError,\n )\n return None\n\n if resp is not None and resp.status_code != status_code:\n message = u'Aggregate analytic error. From URL {} got response {}'.format(\n resp.url, resp.content,\n )\n warn(message, RemoteApiError)\n return resp\n\n return wrapped\n\n return wrapper\n\n\ndef true_by_status(success_status_code):\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n try:\n resp = f(*args, **kwargs)\n except BaseException as error:\n warn(\n '{}'.format(\n error.__class__.__name__,\n getattr(\n error, 'message',\n u' '.join(\n str(i) for i in getattr(error, 'args', []) if not isinstance(i, int)\n )\n ),\n ),\n RemoteApiError,\n )\n return False\n\n if resp.status_code == success_status_code:\n return True\n return False\n\n return wrapped\n\n return wrapper\n\n\nclass Seisma(Session):\n\n def __init__(self,\n base_url,\n job_name,\n job_title,\n build_title,\n build_name=None,\n api_version=None,\n build_metadata=None,\n build_name_preffix=None,\n connection_timeout=None):\n self.base_url = base_url\n self.api_version = api_version or DEFAULT_API_VERSION\n\n self.api_url = '{}/api/v{}'.format(self.base_url, self.api_version)\n\n self.build_title = build_title or DEFAULT_BUILD_TITLE\n self.build_name = quote(build_name or DEFAULT_BUILD_NAME)\n\n if build_name_preffix:\n self.build_name = build_name_preffix + self.build_name\n\n self.job_name = quote(job_name)\n self.job_title = job_title\n\n self.build_metadata = build_metadata\n\n super(Seisma, self).__init__()\n\n self.headers['Content-Type'] = 'application/json'\n\n self.connection_timeout = connection_timeout\n\n def request(self, *args, **kwargs):\n kwargs.setdefault('timeout', self.connection_timeout or DEFAULT_CONNECTION_TIMEOUT)\n\n return super(Seisma, self).request(*args, **kwargs)\n\n @will_expected(201)\n def create_job(self, description=None):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}'.format(self.job_name),\n )\n json = {\n 'title': self.job_title,\n }\n if description:\n json['description'] = description\n\n return self.post(url, json=json)\n\n @true_by_status(200)\n def job_exists(self):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}'.format(self.job_name),\n )\n\n return self.get(url)\n\n @will_expected(201)\n def start_build(self):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}/builds/{}/start'.format(\n self.job_name, self.build_name,\n ),\n )\n\n json = {\n 'title': self.build_title,\n }\n\n if isinstance(self.build_metadata, dict):\n json['metadata'] = self.build_metadata\n\n return self.post(url, json=json)\n\n @will_expected(200)\n def stop_build(self,\n runtime,\n was_success,\n tests_count,\n success_count,\n fail_count,\n error_count):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}/builds/{}/stop'.format(\n self.job_name, self.build_name,\n ),\n )\n json = {\n 'runtime': runtime,\n 'was_success': was_success,\n 'tests_count': tests_count,\n 'success_count': success_count,\n 'fail_count': fail_count,\n 'error_count': error_count,\n }\n\n return self.put(url, json=json)\n\n @true_by_status(200)\n def case_exists_on_job(self, name):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}/cases/{}'.format(\n self.job_name, quote(name),\n ),\n )\n\n return self.get(url)\n\n @will_expected(201)\n def add_case_to_job(self, name, description=None):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}/cases/{}'.format(\n self.job_name, quote(name),\n ),\n )\n json = {}\n\n if description:\n json['description'] = description\n\n return self.post(url, json=json)\n\n @will_expected(200)\n def update_case_in_job(self, name, description):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}/cases/{}'.format(\n self.job_name, quote(name),\n ),\n )\n json = {\n 'description': description,\n }\n\n return self.put(url, json=json)\n\n @will_expected(201)\n def add_case_result(self, name, status, runtime, reason=None, metadata=None):\n url = '{}{}'.format(\n self.api_url,\n '/jobs/{}/builds/{}/cases/{}'.format(\n self.job_name, self.build_name, quote(name),\n ),\n )\n\n json = {\n 'status': status,\n 'runtime': runtime,\n 'dialect': DIALECT,\n }\n\n if reason:\n json['reason'] = reason\n\n if metadata:\n json['metadata'] = metadata\n\n return self.post(url, json=json)\n","repo_name":"seisma-test-analytics/seisma-client","sub_path":"python/seisma/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"33604820006","text":"#!/usr/bin/env python3\r\n\r\nimport json\r\nimport logging\r\nimport os\r\nimport uuid\r\n\r\nfrom cassandra.cluster import Cluster\r\nfrom cassandra.policies import RoundRobinPolicy\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n\r\nclass database:\r\n def __init__(self, database):\r\n try:\r\n self.dbHost = os.environ['DATABASE_HOST'].split(',')\r\n logger.info(\"Database Host \" + str(self.dbHost))\r\n self.database = database\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n pass\r\n\r\n def __del__(self):\r\n logger.error('Removing database object')\r\n\r\n def connect(self):\r\n ret = False\r\n try:\r\n logger.info(\"Database Host connecting\" + str(self.dbHost))\r\n\r\n db_cluster = Cluster(self.dbHost, load_balancing_policy=RoundRobinPolicy())\r\n self.db_session = db_cluster.connect(keyspace=self.database)\r\n self.db_session.default_timeout = 5.0\r\n ret = True\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n return ret\r\n\r\n def create(self, table):\r\n try:\r\n rows = self.queryTableExist(table)\r\n if not rows:\r\n logger.info('create table {0}'.format(table))\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n def insert(self, table, data):\r\n try:\r\n logger.info('Insert Records into {0}'.format(table))\r\n stmt = self.db_session.prepare('INSERT INTO {0} JSON ?;'.format(table))\r\n data = json.dumps(data)\r\n self.db_session.execute(stmt, [data])\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n def update(self, table, data):\r\n try:\r\n logger.info('Update Table {0}'.format(table))\r\n stmt = self.db_session.prepare('INSERT INTO {0} JSON ? DEFAULT UNSET;'.format(table))\r\n data = json.dumps(data)\r\n self.db_session.execute(stmt, [data])\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n def query(self, table, test_id):\r\n rows = None\r\n\r\n try:\r\n logger.info(\"query {0}\".format(test_id))\r\n if not isinstance(test_id, uuid.UUID):\r\n id = uuid.UUID(test_id)\r\n else:\r\n id = test_id\r\n\r\n query = \"SELECT * FROM {0} WHERE test_id=%s allow filtering\".format(table)\r\n future = self.db_session.execute_async(query, [id])\r\n rows = future.result()\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n return rows\r\n\r\n def queryKey(self, table, key, value):\r\n rows = None\r\n try:\r\n query = \"SELECT * FROM {0} WHERE {1}=%s allow filtering\".format(table, key)\r\n future = self.db_session.execute_async(query, [value])\r\n rows = future.result()\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n return rows\r\n\r\n def queryTableExist(self, table):\r\n try:\r\n query = \"SELECT table_name FROM system_schema.tables WHERE keyspace_name=%s and table_name=%s allow filtering\"\r\n future = self.db_session.execute_async(query, [self.database, table])\r\n rows = future.result()\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n return rows\r\n\r\n def AddResults(self, test_id, test_name, num_clients, tier, ip_gateway, gateway_version, client_version, protocol, test_mode, encryption, test_key):\r\n try:\r\n result = dict()\r\n result['test_id'] = str(test_id)\r\n result['gateway_version'] = gateway_version\r\n result['client_version'] = client_version\r\n result['ip_gateway'] = ip_gateway\r\n result['tier'] = tier\r\n result['protocol'] = protocol\r\n result['num_clients'] = num_clients\r\n result['test_name'] = test_name\r\n result['test_key'] = test_key\r\n # result['test_mode']=test_mode\r\n # result['encryption']=encryption\r\n self.insert('perf_results', result)\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n def UpdateResults(self, test_id, test_mode, reason):\r\n try:\r\n clientSuccess = 0\r\n duration = 60\r\n result = dict()\r\n mbpsStats = []\r\n error_text = reason\r\n numClients = 0\r\n\r\n rows = self.query('perf2', test_id)\r\n logger.info(\"update \")\r\n for row in rows:\r\n clientSuccess += 1\r\n duration = row.duration\r\n numClients = row.num_clients\r\n error_text += ' Err:{0} {1}'.format(row.error_no, row.error_text)\r\n if test_mode == 'download':\r\n mbpsStats.append(float(row.sent_mbps))\r\n else:\r\n mbpsStats.append(float(row.rcvd_mbps))\r\n\r\n # add perf summary in summary table\r\n if mbpsStats:\r\n result['result'] = 'Success'\r\n result['test_id'] = str(test_id)\r\n result['failed_rate'] = int((len(mbpsStats)/2)/numClients * 100)\r\n result['avg_mbps'] = sum(mbpsStats)/len(mbpsStats)\r\n result['max_mbps'] = max(mbpsStats)\r\n result['min_mbps'] = min(mbpsStats)\r\n result['error_text'] = 'Test Completed'\r\n result['duration'] = duration\r\n else:\r\n result['test_id'] = str(test_id)\r\n result['result'] = 'Failed'\r\n result['error_text'] = error_text\r\n\r\n logger.info(\"update {0}\".format(result))\r\n self.update('perf_results', result)\r\n\r\n return result\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n\r\n def UpdateGatewayStats(self, test_id, cpu, cpu_free, mem, mem_free, packetsPerSec):\r\n result = dict()\r\n\r\n result['test_id'] = str(test_id)\r\n result['gateway_cpu_utilization'] = cpu\r\n result['gateway_cpu_free'] = cpu_free\r\n result['gateway_memory'] = mem\r\n result['gateway_memory_free'] = mem_free\r\n result['packetsPerSec'] = packetsPerSec\r\n self.update('perf_results', result)\r\n\r\n def QueryToJson(self, table, test_id):\r\n try:\r\n endpoints = dict()\r\n outDict = dict()\r\n data = dict()\r\n events = []\r\n hostinfo = dict()\r\n testName = \"test-cell\"\r\n duration = \"60\"\r\n\r\n outDict[\"test-id\"] = test_id\r\n\r\n rows = self.query(table, test_id)\r\n for row in rows:\r\n data = dict()\r\n\r\n if row.session_id:\r\n data[\"session\"] = row.session_id\r\n\r\n perfRows = self.queryKey('perf2', 'session_key', row.session_id)\r\n for prow in perfRows:\r\n # set topology\r\n hostinfo = dict()\r\n hostinfo[\"name\"] = row.name\r\n hostinfo[\"instance-type\"] = \"t2.micro\"\r\n endpoints[row.source] = hostinfo\r\n testName = prow.test_name\r\n duration = prow.duration\r\n\r\n data[\"ts\"] = row.timestamp_value\r\n data[\"op\"] = row.op\r\n data[\"payload\"] = json.loads(row.raw_json)\r\n data[\"source\"] = row.source\r\n events.append(data)\r\n\r\n # sort events for now\r\n events = sorted(events, key=lambda p: p[\"ts\"], reverse=False)\r\n\r\n outDict[\"events\"] = events\r\n # get first event and set start time\r\n outDict[\"start\"] = events[0][\"ts\"]\r\n outDict[\"topology\"] = endpoints\r\n outDict[\"test-name\"] = testName\r\n outDict[\"duration\"] = duration\r\n except Exception as e:\r\n logger.error(\"Error: {0}\".format(e))\r\n else:\r\n return json.dumps(outDict)\r\n","repo_name":"tjohnson-github/Test-Orchestartor","sub_path":"test-common/test_common/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"2419873366","text":"import numpy as np \nimport pandas as pd\nfrom numba import jit\nimport backtest\n\nfrom copy import deepcopy\nfrom database import general_util\nfrom database.general_util import timeme\n\nclass Alpha():\n\n def __init__(self, instruments, dfs, configs):\n self.instruments = instruments\n self.dfs = deepcopy(dfs)\n self.configs = configs\n self.portfolio_df = None\n\n def get_trade_datetime_range(self):\n return (self.configs['start'], self.configs['end'])\n\n\n def compute_metas(self, index):\n\n\n @jit(nopython=True)\n def numba_any(x):\n return int(np.any(x))\n \n vols , rets , actives , closes , fxconvs = [] , [] , [] , [] , []\n\n\n for inst in self.instruments:\n df = pd.DataFrame(index=index)\n self.dfs[inst]['vol'] = (-1 + self.dfs[inst]['close']/self.dfs[inst].shift(1)['close']).rolling(30).std()\n #print(self.dfs[inst])\n self.dfs[inst] = df.join(self.dfs[inst])\n #print(self.dfs[inst])\n self.dfs[inst] = self.dfs[inst].fillna(method='ffill').fillna(method='bfill')\n self.dfs[inst]['ret']= -1 + self.dfs[inst]['close']/self.dfs[inst].shift(1)['close']\n self.dfs[inst]['sampled'] = self.dfs[inst]['close']!=self.dfs[inst].shift(1)['close']\n self.dfs[inst]['active'] = self.dfs[inst]['sampled'].rolling(5).apply(numba_any ,engine =\"numba\", raw = True).fillna(0)\n \n \n \n vols.append(self.dfs[inst]['vol'])\n rets.append(self.dfs[inst]['ret'])\n actives.append(self.dfs[inst]['active'])\n closes.append(self.dfs[inst]['close'])\n \n for inst in self.instruments:\n if inst[-3:] == 'USD':\n fxconvs.append(pd.Series(index=index,data=np.ones(len(index))))\n elif inst[-3:] + 'USD%USD' in self.dfs:\n fxconvs.append(self.dfs [inst[-3:] + \"USD%USD\"][\"close\"])\n elif 'USD' + inst[-3:] + '%' + inst[-3:] in self.dfs:\n fxconvs.append(1/ self.dfs['USD' + inst[-3:] + '%' + inst[-3:]]['close'])\n else:\n print('NO SOlution', inst,inst[-3:])\n exit()\n self.voldf = pd.concat(vols,axis=1)\n self.voldf.columns = self.instruments\n self.retdf = pd.concat(rets , axis =1)\n self.retdf.columns = self.instruments\n self.activedf = pd.concat(actives , axis =1)\n self.activedf.columns = self.instruments\n closedf = pd.concat(closes , axis =1)\n closedf.columns = self.instruments\n fxconvsdf = pd.concat(fxconvs , axis =1)\n fxconvsdf.columns = self.instruments\n self.baseclosedf = fxconvsdf * closedf\n pass\n \n\n\n def compute_forecasts(self,index,date):\n pass\n\n def post_risk_management(self, index , date , eligibles , nominal_tot , positions , weights):\n return nominal_tot, positions,weights\n\n def init_portfolio_settings(self, trade_range):\n self.portfolio_df = pd.DataFrame(index=trade_range).reset_index().rename(columns={'index':'datetime'})\n \n return 10000,0.001,1,self.portfolio_df\n \n def compute_eligibles(self, date):\n eligibles = [inst for inst in self.instruments if self.dfs[inst].at[date,'eligible']]\n non_eligibles = [inst for inst in self.instruments if not self.dfs[inst].at[date,'eligible']]\n return eligibles, non_eligibles\n \n def get_strat_scalar(self, lookback, portfolio_vol, idx, default,ewmas,ewstrats):\n #print(idx)\n ann_realized_vol = np.sqrt(ewmas[-1]*252)\n return portfolio_vol/ann_realized_vol*ewstrats[-1]\n\n def set_positions(self, capital, portfolio_i, forecasts,\n eligibles, num_trading, portfolio_vol, strat_scalar,\n invrisk_row, baseclose_row):\n vol_target = 1.0/num_trading*capital*portfolio_vol/np.sqrt(253)\n positions = eligibles*strat_scalar*vol_target*forecasts*invrisk_row/baseclose_row\n positions = np.nan_to_num(positions,nan=0,posinf=0,neginf=0)\n nominal_tot = np.linalg.norm(positions*baseclose_row,ord=1)\n return positions, nominal_tot\n\n def set_weights(self, nominal_tot, positions, baseclose_row):\n nominals = positions * baseclose_row\n weights = np.nan_to_num(nominals/nominal_tot,nan=0,posinf=0,neginf=0)\n return weights\n\n @timeme\n def run_simulation(self, verbose=False):\n\n portfolio_vol = 0.4\n trade_datetime_range = pd.date_range(start=self.get_trade_datetime_range()[0],\n end=self.get_trade_datetime_range()[1],freq='D')\n\n self.compute_metas(index=trade_datetime_range)\n\n capital,ewma, ewstrat,self.portfolio_df = self.init_portfolio_settings(trade_range=trade_datetime_range)\n\n date_prev = None\n baseclose_prev = None\n capitals = [capital]\n ewmas = [ewma]\n ewstrats = [ewstrat]\n nominalss = []\n leverages = []\n strat_scalars = []\n units_held = []\n weights_held = []\n\n for (portfolio_i,portfolio_row),(ret_i,ret_row),(baseclose_i,baseclose_row),\\\n (eligibles_i,eligibles_row),(invrisk_i,invrisk_row) in \\\n zip(self.portfolio_df.iterrows(),self.retdf.iterrows(),self.baseclosedf.iterrows(),\\\n self.eligiblesdf.iterrows(),self.invriskdf.iterrows()):\n portfolio_row = portfolio_row.values\n ret_row = ret_row.values\n baseclose_row = baseclose_row.values\n eligibles_row = eligibles_row.values\n invrisk_row = invrisk_row.values\n\n strat_scalar = 2\n #print(portfolio_i)\n if portfolio_i != 0:\n strat_scalar = self.get_strat_scalar(lookback=30, portfolio_vol=portfolio_vol,\n idx=portfolio_i, default=strat_scalars[-1],\n ewmas=ewmas,ewstrats=ewstrats)\n capitals, nominal_ret,ewmas = backtest.get_pnl_stats(\n portfolio_df=self.portfolio_df,\n last_weights= weights_held[-1],\n last_units=units_held[-1],\n idx=portfolio_i,\n baseclose_row=baseclose_prev,\n ret_row=ret_row,\n capitals=capitals,\n leverages=leverages,\n ewmas=ewmas\n )\n \n ewstrats.append(0.06*strat_scalar + .94*ewstrats[-1] if nominal_ret!=0 else ewstrats[-1])\n\n strat_scalars.append(strat_scalar)\n\n forecasts , num_trading = self.compute_forecasts(\n portfolio_i=portfolio_i,\n date=ret_i,\n eligibles_row=eligibles_row\n )\n positions , nominal_tot = self.set_positions(\n capital=capitals[-1],\n portfolio_i= portfolio_i,\n forecasts=forecasts,\n eligibles=eligibles_row,\n num_trading=num_trading,\n portfolio_vol=portfolio_vol,\n strat_scalar=strat_scalars[-1],\n invrisk_row=invrisk_row,\n baseclose_row=baseclose_row\n )\n \n weights = self.set_weights(nominal_tot, positions, baseclose_row)\n \n nominal_tot, positions, weights = self.post_risk_management(\n index=portfolio_i,\n date=ret_i,\n eligibles=eligibles_row,\n nominal_tot=nominal_tot,\n positions=positions,\n weights=weights\n )\n \n date_prev = portfolio_i\n baseclose_prev = baseclose_row\n nominalss.append(nominal_tot)\n leverages.append(nominal_tot/capitals[-1])\n units_held.append(positions)\n weights_held.append(weights)\n\n if verbose:\n capital_ser = pd.Series(data=capitals,index=trade_datetime_range,name='capital')\n stratscal_ser = pd.Series(data=strat_scalars,index=trade_datetime_range,name='strat_scalar')\n nominals_ser = pd.Series(data=nominalss,index=trade_datetime_range,name='nominal_tot')\n leverages_ser = pd.Series(data=leverages,index=trade_datetime_range,name='leverage')\n units = pd.DataFrame(data=units_held, index=trade_datetime_range, \n columns=[inst+' units' for inst in self.instruments])\n weights = pd.DataFrame(data=weights_held, index=trade_datetime_range, \n columns=[inst+' w' for inst in self.instruments])\n \n self.portfolio_df = pd.concat([\n units,\n weights,\n stratscal_ser,\n nominals_ser,\n leverages_ser,\n capital_ser\n ], axis=1)\n print(self.portfolio_df)\n else:\n print(capitals)\n return self.portfolio_df\n \n\n","repo_name":"Yonasbo/Alpha-Database","sub_path":"Alpha/alpha.py","file_name":"alpha.py","file_ext":"py","file_size_in_byte":9123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"27895079723","text":"def qnsgo(n):\n temp = []\n for i in range(len(n)):\n temp.append(int(n[i]))\n return temp\n\nn = input()\nif int(n)<1 or int(n)>1000000:\n print(\"잘못된 입력!\")\nelse:\n a = len(n)\n todtjdwk = []\n\n for i in range(a*10+a, int(n)):\n mylist = qnsgo(str(i))\n sum = 0\n for j in mylist:\n sum+=j\n if i+sum == int(n):\n todtjdwk.append(i)\n\n if len(todtjdwk) == 0:\n print(0)\n else:\n print(min(todtjdwk))\n","repo_name":"CHmon-Park/piro12","sub_path":"20200110 금요일 과제/20200109 파이썬 과제.py","file_name":"20200109 파이썬 과제.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14972131421","text":"from credit_card_defaulters.logger import logging\nfrom credit_card_defaulters.exception import CreditException\nfrom credit_card_defaulters.entity.artifact_entity import ModelPusherArtifact, ModelEvaluationArtifact \nfrom credit_card_defaulters.entity.config_entity import ModelPusherConfig\nimport os, sys\nimport shutil\n\n\nclass ModelPusher:\n\n def __init__(self, model_pusher_config: ModelPusherConfig,\n model_evaluation_artifact: ModelEvaluationArtifact\n ):\n try:\n logging.info(f\"{'>>' * 30}Model Pusher log started.{'<<' * 30} \")\n self.model_pusher_config = model_pusher_config\n self.model_evaluation_artifact = model_evaluation_artifact\n\n except Exception as e:\n raise CreditException(e, sys) from e\n\n def export_model(self) -> ModelPusherArtifact:\n try:\n ## model trained will be here in this path and if and only if model evaltuon status if true, \n ## this pusher will execute\n evaluated_model_file_path = self.model_evaluation_artifact.evaluated_model_path ## trained model path\n export_dir = self.model_pusher_config.export_dir_path ## saved models folder paath\n model_file_name = os.path.basename(evaluated_model_file_path)## fetching model name to save it into saved models folder\n export_model_file_path = os.path.join(export_dir, model_file_name) ## final path to push model into saved models filder\n logging.info(f\"Exporting model file: [{export_model_file_path}]\")\n os.makedirs(export_dir, exist_ok=True)\n \n ## now copying the file from source to destination path\n shutil.copy(src=evaluated_model_file_path, dst=export_model_file_path)\n #we can call a function to save model to push to Azure blob storage/ google cloud strorage / s3 bucket\n logging.info(\n f\"Trained model: {evaluated_model_file_path} is copied in export dir:[{export_model_file_path}]\")\n\n model_pusher_artifact = ModelPusherArtifact(is_model_pusher=True,\n export_model_file_path=export_model_file_path\n )\n logging.info(f\"Model pusher artifact: [{model_pusher_artifact}]\")\n return model_pusher_artifact\n except Exception as e:\n raise CreditException(e, sys) from e\n\n def initiate_model_pusher(self) -> ModelPusherArtifact:\n try:\n return self.export_model()\n except Exception as e:\n raise CreditException(e, sys) from e\n\n def __del__(self):\n logging.info(f\"{'>>' * 20}Model Pusher log completed.{'<<' * 20} \")","repo_name":"krishnan5307/Credit_card_default_prediction_with_mlflow","sub_path":"credit_card_defaulters/component/model_pusher.py","file_name":"model_pusher.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"26328421115","text":"from django.core.exceptions import ValidationError\n\n\ndef validate_views(cleaned_data):\n for view in cleaned_data.get('views'):\n if cleaned_data.get('domain'):\n check_no_ns_soa_condition(cleaned_data['domain'], view=view)\n if cleaned_data.get('reverse_domain'):\n check_no_ns_soa_condition(cleaned_data['reverse_domain'],\n view=view)\n\n\ndef check_no_ns_soa_condition(domain, view=None):\n if domain.soa:\n fail = False\n root_domain = domain.soa.root_domain\n if root_domain and not root_domain.nameserver_set.exists():\n fail = True\n elif (view and\n not root_domain.nameserver_set.filter(views=view).exists()):\n fail = True\n if fail:\n if view:\n error = '{0} view in {1}'.format(view, domain.name)\n else:\n error = 'domain: {0}'.format(domain.name)\n raise ValidationError(\n \"The {0} you are trying to assign this record into does \"\n \"not have an NS record, thus cannot support other \"\n \"records.\".format(error))\n","repo_name":"OSU-Net/cyder","sub_path":"cyder/cydns/view/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"20"} +{"seq_id":"7339730033","text":"#!/usr/bin/env python3\n\n\"\"\"\n------------------------------------------------------------------------------\nPackageDNA - diciembre 2019 - Carlos Avila @badboy_nt\n - Diego Espitia @dsespitia\n - Franco Piergallini @francoguida\n------------------------------------------------------------------------------\n\"\"\"\n\n# %%%%%%%%%%% Libraries %%%%%%%%%%%#\n\n\nimport os\nimport sys\nimport click\nimport logging\nimport colorama\nimport subprocess\nfrom flask_server.run_server import run_server\nfrom menu_classes.main_menu import MainMenu\nfrom auxiliar_functions.banner import banner\nfrom auxiliar_functions.auxiliar_functions import create_dir\nfrom auxiliar_functions.auxiliar_functions import end_execution\n\n# %%%%%%% Context Variables %%%%%%%#\nfrom menu_classes.single_library_menu import SingleLibraryMenu\nfrom menu_classes.user_menu import UserMenu\n\nVERSION = 0.1\nLOG_ERRORS_FILENAME = 'log_errors.txt'\nlogging.basicConfig(filename=LOG_ERRORS_FILENAME, filemode='a+')\n\n# %%%%%%%%%% Main %%%%%%%%%#\n\n\n@click.group()\ndef cli():\n pass\n\n\n@click.group(help='Packages related commands')\ndef packages():\n \"\"\"Function generated for use the command line\"\"\"\n pass\n\n\n@click.group(help='Users related commands')\ndef users():\n \"\"\"Function generated for use the command line\"\"\"\n pass\n\n\ncli.add_command(packages)\ncli.add_command(users)\n\n\n@users.command()\n@click.option('--user_name', help='Analyze a username over platforms')\ndef analyze_user(user_name):\n UserMenu(None).start_analysis(user_name)\n\n\n@packages.command()\n@click.option('--package_name', help='Analyze a python package all versions')\ndef analyze_python_package(package_name):\n SingleLibraryMenu(None).get_python_package(package_name=package_name)\n run_server(package_name)\n\n\n@packages.command()\n@click.option('--package_name', help='Analyze a ruby package all versions')\ndef analyze_ruby_package(package_name):\n SingleLibraryMenu(None).get_ruby_gem(package_name=package_name)\n run_server(package_name)\n\n\n@packages.command()\n@click.option('--package_name', help='Analyze a npm package all versions')\ndef analyze_npm_package(package_name):\n SingleLibraryMenu(None).get_npm_package(package_name=package_name)\n run_server(package_name)\n\n\n@packages.command(help='List analyzed packages')\ndef list_packages():\n packages = os.listdir('flask_server/static/data')\n click.secho('Analyzed packages:\\n\\t' + \"\\n\\t\".join(packages), fg='green')\n\n\n@packages.command(help='Run analyzed package')\n@click.option('--package_name', help='Name of an analized package')\ndef run_package(package_name):\n if os.path.isfile(f'flask_server/static/data/{package_name}'):\n run_server(package_name)\n else:\n click.secho('Package not found', fg='red')\n\n\npackages.add_command(analyze_python_package)\npackages.add_command(analyze_ruby_package)\npackages.add_command(analyze_npm_package)\npackages.add_command(list_packages)\npackages.add_command(run_package)\nusers.add_command(analyze_user)\n\n\ndef main():\n\n if len(sys.argv) > 1:\n cli()\n else:\n try:\n os.system('cls' if os.name == 'nt' else 'clear')\n colorama.init(autoreset=\"True\")\n create_dir()\n main_menu = MainMenu()\n banner()\n main_menu.generate_menu()\n except KeyboardInterrupt:\n end_execution()\n except Exception as e:\n print(f'Error unexpected: {e}')\n print('Something went very wrong ...' + u\"\\U0001F62D\"*3)\n logging.error(e, exc_info=True)\n print(f'Please send the file {LOG_ERRORS_FILENAME}'\n f' to innovationlab@11paths.com')\n\n\nif __name__ == '__main__':\n main()\n\n# %%%%%%%%%% The End %%%%%%%%%%\n","repo_name":"TelefonicaTC2Tech/packagedna","sub_path":"packagedna.py","file_name":"packagedna.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"20"} +{"seq_id":"27771127384","text":"__doc__\n\n\ndef are_nodes_adjacent(node1, node2, matrix):\n \"\"\"\n The function takes 3 arguments:\n\n - index of the first graph node (int)\n\n - index of the second graph node (int)\n\n - a node adjacency matrix (nested list).\n\n In the matrix, 1 indicates that a connection is true, and 0 indicates a connection is false.\n\n The function determines whether the given nodes are connected or not and returns a boolean.\n \"\"\"\n # first node's connections with other nodes: matrix[node1]\n # first node's connection with the second node: matrix[node1][node2]\n # 0 and 1 are easily converted into boolean values:\n return bool(matrix[node1][node2])\n\n\ndef main():\n matrix1 = [[0, 1, 0, 0], [1, 0, 1, 1], [0, 1, 0, 1], [0, 1, 1, 0]]\n answer1 = are_nodes_adjacent(0, 1, matrix1)\n answer2 = are_nodes_adjacent(0, 2, matrix1)\n print(answer1)\n print(answer2)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"olhanotolga/python-challenges","sub_path":"finding_adjacent_nodes/adjacent_nodes.py","file_name":"adjacent_nodes.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25565652787","text":"# coding=utf-8\n\"\"\"\n 本程序使用requests库和re库实现爬取豆瓣图书top250.\n\"\"\"\nimport requests\nimport re\nimport multiprocessing\n\n\nclass Spider(object):\n def __init__(self, headers=None, cookie=None):\n self.headers = headers\n self.cookie = cookie\n self.result = []\n self.fetch_list = []\n\n def get_content(self, url):\n content = requests.get(url)\n content = content.text.encode('utf-8')\n\n return content\n\n def get_content_by_regex(self, start):\n url = \"https://book.douban.com/top250?start=\" + str(start)\n content = self.get_content(url)\n\n regex = '.*?(.*?).*?'\\\n '.*?(.*?)

.*?'\\\n '(.*?).*?\\((.*?)\\).*?'\\\n 'class=\\\"inq\\\">(.*?).*?
'\n\n pattern = re.compile(regex, re.S)\n _ = re.findall(pattern, content)\n\n for item in _:\n # 这里可以把数据存放到mongodb中\n print(item[0].strip())\n self.result.append(item)\n\n def go(self):\n \"\"\"\n 这里有个问题就是每次都是不同的实例,不能把数据保存到result中,除非设置全局变量。\n \"\"\"\n pool = multiprocessing.Pool(multiprocessing.cpu_count()*10)\n pool.map(self, [i for i in range(0, 250, 25)])\n\n def __call__(self, start):\n return self.get_content_by_regex(start)\n\nif __name__ == '__main__':\n spider = Spider()\n spider.go()\n print(spider.result)\n","repo_name":"xiaoqibao-growing/Python","sub_path":"spider/douban/book_douban_top250.py","file_name":"book_douban_top250.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"72985099570","text":"import json \nimport tweepy\nimport socket\nimport re\nimport requests\n\n#Removing Links from the data\ndef removingLinks(t):\n t = re.sub(r\"http\\S+\", \"\", t)\n return t\n\n#Removing Emojis from the data\ndef removingEmoji(t):\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\"\n u\"\\U0001F300-\\U0001F5FF\"\n u\"\\U0001F680-\\U0001F6FF\"\n u\"\\U0001F1E0-\\U0001F1FF\"\n \"]+\", flags=re.UNICODE)\n t = emoji_pattern.sub(r'', t)\n return t\n\n#Removing white space characters from the data \ndef removingSpaces(t):\n t = re.sub(r'@\\S*[\\s, .]',r'',t)\n return t\n\n#Removing Special Characters\ndef removingSpecialCharacters(tweet): \n tweet = tweet.replace(\"RT\", \"\")\n tweet = re.sub(r'[^a-zA-Z0-9 .#]',r'',tweet)\n return tweet\n\n#Access Tokens and Keys\nACCESS_TOKEN = '984486545041313793-12d5AlbRPPaZBlkzlLSl0uBfzD13OnI'\nACCESS_SECRET = 'xuLWnsVKxgsinTPGf7ARO4kPu4IEwKz6j9w0S1nQrHlP3'\nCONSUMER_KEY = 'Dfeyuzv6U4inGQrKLbVxqbWmV'\nCONSUMER_SECRET = '\tsMJqc7ZVlVb6RfhwrM8Cfhdnmsk06A3wNh8Kjiq8dCQhN2qTT5'\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\nhashtag = '#guncontrolnow'\nTCP_IP = 'localhost'\nTCP_PORT = 9002\n\n#Creating sockets\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((TCP_IP, TCP_PORT))\ns.listen(1)\nconn, addr = s.accept()\n\nclass MyStreamListener(tweepy.StreamListener):\n \n def on_status(self, status): \n\n tweet = status.text\n tweet = removingLinks(tweet)\n tweet = removingSpaces(tweet)\n tweet = removingEmoji(tweet)\n tweet = removingSpecialCharacters(tweet)\n\n loc = status.user.location\n if loc is None:\n return { 'lat' : 0.0, 'lon' : 0.0}\n else:\n params = {'sensor': 'false', 'address': loc}\n r = requests.get('https://maps.googleapis.com/maps/api/geocode/json', params=params)\n results = r.json()['results']\n for res in results:\n print(res['geometry']['location'])\n location = { 'lat' :res['geometry']['location']['lat'],\n 'lon' : res['geometry']['location']['lng']}\n\n dict = {'text': tweet, \n 'location': loc, \n 'timestamp_ms' : status.timestamp_ms }\n print(dict)\n conn.send((json.dumps(dict) + \"\\n\").encode('utf-8'))\n \n def on_error(self, status_code):\n if status_code == 420:\n return False\n else:\n print(status_code)\n\nmyStream = tweepy.Stream(auth=auth, listener=MyStreamListener())\nmyStream.filter(track=[hashtag])\n","repo_name":"guptakavya/BigData","sub_path":"Assignment3/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14623227743","text":"from googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nimport pickle\nfrom datetime import datetime, timedelta\nimport datefinder\nimport sys\n\n\ndef get_credentials():\n scopes = ['https://www.googleapis.com/auth/calendar']\n flow = InstalledAppFlow.from_client_secrets_file(\"client_secret.json\", scopes=scopes)\n credentials = flow.run_console()\n pickle.dump(credentials, open(\"token.pk1\", \"wb\"))\n credentials = pickle.load(open(\"token.pk1\", \"rb\"))\n return credentials\n\n\ndef get_calendar_service():\n\n try:\n credentials = pickle.load(open(\"token.pk1\", \"rb\"))\n except FileNotFoundError:\n credentials = get_credentials()\n service = build(\"calendar\", \"v3\", credentials=credentials)\n return service\n\n\ndef create_event(service, start_time_str, summary, duration=1, description=None, location=None):\n\n matches = list(datefinder.find_dates(start_time_str))\n if len(matches):\n start_time = matches[0]\n end_time = start_time + timedelta(hours=duration)\n\n event = {\n 'summary': summary,\n 'location': location,\n 'description': description,\n 'start': {\n 'dateTime': start_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n 'timeZone': 'America/New_York',\n },\n 'end': {\n 'dateTime': end_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n 'timeZone': 'America/New_York',\n },\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n print(summary + \" created successfully\")\n return service.events().insert(calendarId='primary', body=event).execute()\n\n\ndef get_first_event(service):\n result = service.calendarList().list().execute()\n calendar_id = result['items'][0]['id']\n result = service.events().list(calendarId=calendar_id, timeZone=\"America/New_York\").execute()\n print(result['items'][0])\n\n\ndef main():\n service = get_calendar_service()\n summary = sys.argv[1]\n start_time = sys.argv[2]\n duration = int(sys.argv[3])\n create_event(service, start_time, summary, duration)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"snguyen10/GoogleCalendarAPIExplore","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"32881239595","text":"\ndebuglog(\"Reading the Android build definition...\")\ndefault = module(\"default\") # TODO: move to build.py\ntarget = module(\"targets\") # TODO: move to build.py\n\nplatform_attributes += [\"android\", \"linux-arm\"]\nplatform_binary_name = \"kanzi\"\ndefault.type = \"so\"\nplatform_default_type = \"so\" # TODO: redesign / remove / fix before release\n\nif \"ES1\" in used_profiles:\n target_device = \"android-4\"\n es_version = \"ES1\"\nelse:\n target_device = \"android-8\"\n es_version = \"ES2\"\n\n \n# paths on the build system \n\n# try to find the ndk_path from environment variables\n# look for NDK_PATH and after that search PATH\nfrom os import environ\nndk_path = None\nif \"NDK_PATH\" in environ:\n ndk_path = environ[\"NDK_PATH\"] \nif not ndk_path and \"PATH\" in environ:\n for p in environ[\"PATH\"].split(\":\"):\n if \"android-ndk\" in p:\n ndk_path = p\n break\nif not ndk_path: \n ndk_path = \"/usr/sdk/android/android-ndk-r5b\"\n warning(\"NDK_PATH is not defined, assuming \" + ndk_path + \". Please export NDK_PATH=\")\n \nsdk_tool_path = ndk_path + \"/toolchains/arm-eabi-4.4.0/prebuilt/linux-x86\"\n#sdk_tool_path = ndk_path_r4b + \"/build/prebuilt/linux-x86/arm-eabi-4.4.0\"\n\nsdk_platform_path = ndk_path + \"/platforms/\" + target_device + \"/arch-arm\"\n#sdk_platform_path = ndk_path_r4b + \"/build/platforms/\" + target_device + \"/arch-arm\"\n\n\n# Toolchain\n\ndefault.env[\"CC\"] = default.env[\"SHCC\"] = sdk_tool_path + \"/bin/arm-eabi-gcc\"\ndefault.env[\"CXX\"] = default.env[\"SHCXX\"] = sdk_tool_path + \"/bin/arm-eabi-g++\"\ndefault.env[\"AR\"] = sdk_tool_path + \"/bin/arm-eabi-ar\"\n\n\n# Compiler and linker flags, libraries etc.\n\ndefault.env[\"CPPDEFINES\"] += [\"ANDROID\"]\n#profile(\"release\").env[\"CFLAGS\"].remove(\"-O2\") # TODO: should there be -O? in release? Not with -g anyway.\n\ndefault.env[\"CPPPATH\"] += [sdk_platform_path + \"/usr/include\"]\ndefault.env[\"LIBPATH\"] += [sdk_platform_path + \"/usr/lib\"]\ndefault.env[\"LIBS\"] += [\"log\", \"c\", \"m\", \"dl\"]\nlibrary(\"opengl_es_1_1\").binaries += [\"GLESv1_CM\"]\nlibrary(\"opengl_es_2_0\").binaries += [\"GLESv2\"]\n\n# \"-g Produce debugging information in the operating system's native format\"\n# ndk-build does it, however, so it maybe should included? TODO: test\ndefault.env[\"CCFLAGS\"] += [\n \"-fpic\", \"-mthumb-interwork\", \"-fstack-protector\", \"-fno-short-enums\", \\\n \"-Wno-psabi\", \"-Wno-long-long\", \"-Wno-comment\", \\\n \"-D__ARM_ARCH_5__\", \"-D__ARM_ARCH_5T__\", \"-D__ARM_ARCH_5E__\", \"-D__ARM_ARCH_5TE__\"\n #\"-march=armv5te\", \"-mtune=xscale\", \"-msoft-float\", \"-mthumb\" \\\n #\"-fomit-frame-pointer\", \"-fno-strict-aliasing\", \"-finline-limit=64\"\n #\"-g\"\n]\ndefault.env[\"CXXFLAGS\"] += [\"-fno-exceptions\", \"-fno-rtti\"]\nprofile(\"release\").env[\"CCFLAGS\"] += [\"-ffunction-sections\", \"-funwind-tables\"]\n\ndefault.env[\"SHLINKFLAGS\"] += [\n \"-nostdlib\",\n \"-Wl,--no-undefined\", \"-Wl,-z,noexecstack\", \"-Wl,-Bsymbolic\", \"-fno-short-enums\", \n sdk_tool_path + \"/lib/gcc/arm-eabi/4.4.0/libgcc.a\" \n # \"-Wl,-rpath-link=\" + sdk_platform_path + \"/usr/lib\",\n # sdk_platform_path + \"/usr/lib/libc.so\",\n # sdk_platform_path + \"/usr/lib/libstdc++.so\",\n # sdk_platform_path + \"/usr/lib/libm.so\",\n # sdk_platform_path + \"/lib/gcc/arm-eabi/4.4.0/libgcc.a\",\n]\n\n# Android NDK headers make -ansi flip out, so remove them from source builds\ntry:\n if \"core\" in available_modules and \"user\" in available_modules:\n module(\"core\").env[\"CCFLAGS\"].remove(\"-ansi\")\n module(\"user\").env[\"CCFLAGS\"].remove(\"-ansi\")\nexcept ValueError:\n pass\n\n \n# mechanisms that are needed to make APK building happen without project-specific scripts\n\ntarget.install_dir = \"libs/armeabi\"\ntarget.preactions += [\n #\"android update project -p . -t \" + target_device\n \"/usr/sdk/android/android-sdk-linux_86/tools/android update project -p . -t \" + target_device\n #\"cp GLES_profiles/\" + es_version + \"/AndroidManifest.xml .\",\n #\"cp GLES_profiles/\" + es_version + \"/KanziView.java src/com/rightware/kanzi\"\n #\"ant use_es1\"\n]\ntarget.profile(\"debug\").postactions += [\"ant debug\"]\ntarget.profile(\"profile\").postactions += [\"ant debug\"]\ntarget.profile(\"release\").postactions += [\"ant release\"]\n\n# TODO: temporary hack, won't run ant (or Install?) if this file already exists\n# also ensure install dir exists\n# Bad stuff: may slow down Hudson, for example; forces relinkage of .so (not really that bad)\nfrom os import system\ndebuglog(\"Cleaning the .so file\")\nsystem(\"mkdir -p libs/armeabi\")\nsystem(\"rm -f libs/armeabi/*.so\")\n\n\ndel default\ndel target\n","repo_name":"NickTompkins123/perf_tester","sub_path":"app/jobs/basemarkcl/bench/basemarkcl_gold_source/source_package/source/Kanzi/Engine/configs/platforms/android/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"24917976104","text":"#!/usr/bin/python3\n\nimport itertools\n\ndef AND(b1, b2):\n return b1 & b2\n\ndef OR(b1, b2):\n return b1 | b2\n\ndef XOR(b1, b2):\n return b1 ^ b2\n\nop = [\"AND\", \"OR\", \"XOR\"]\nl = [[op1, op2, op3, op4, op5] for op1 in op for op2 in op for op3 in op for op4 in op for op5 in op]\ncircuits = list()\n\n# There are only 5 types of circuits with 5 operators and 8 input bits\n# (Considering input bits can only be used once):\n# - Starting with OP1(OP2(a,b),OP3(c,d)):\n# * OP1(OP2(OP4(a,b),OP5(c,d)),OP3(e,f))\n# * OP1(OP2(OP4(a,b),c),OP3(OP5(d,e),f))\n# * OP1(OP2(OP4(OP5(a,b),c),d),OP3(e,f))\n# - Starting with OP1(a,OP2(b,OP3(c,d)):\n# * OP1(a,OP2(b,OP3(c,OP4(d,OP5(e,f)))))\n# * OP1(a,OP2(b,OP3(OP4(c,d),OP5(e,f))))\n# With (a,b,c,d,e,f) 6 of the 8 the input bits\n\n# We need list comprehensions for:\n# - 4bits among 8\n# - 2bits among 8\n_8bits = ['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7']\n_4bits_l = list(itertools.combinations(_8bits, 4))\n_2bits_l = list(itertools.combinations(_8bits, 2))\n\n# List of all circuits of type OP1(OP2(OP4(a,b),OP5(c,d)),OP3(e,f))\nfor x in l:\n for _4bits in _4bits_l:\n _4bits_1 = [x for x in _8bits if x not in _4bits]\n _2bits_ll = list(itertools.combinations(_4bits_1, 2))\n for _2bits in _2bits_ll:\n a = _4bits[0]; b = _4bits[1]; c = _4bits[2]; d = _4bits[3]\n e = _2bits[0]; f = _2bits[1]\n circuits.append(x[0]+'('+x[1]+'('+x[2]+'('+a+','+b+'),'+x[3]+'('+c+','+d+')),'+x[4]+'('+e+','+f+'))')\n\n# List of all circuits of type OP1(OP2(OP4(a,b),c),OP3(OP5(d,e),f))\nfor x in l:\n for _4bits in _4bits_l:\n _4bits_1 = [x for x in _8bits if x not in _4bits]\n _2bits_ll = list(itertools.combinations(_4bits_1, 2))\n for _2bits in _2bits_ll:\n a = _4bits[0]; b = _4bits[1]; d = _4bits[2]; e = _4bits[3]\n c = _2bits[0]; f = _2bits[1]\n circuits.append(x[0]+'('+x[1]+'('+x[2]+'('+a+','+b+'),'+c+'),'+x[3]+'('+x[4]+'('+d+','+e+'),'+f+'))')\n c = _2bits[1]; f = _2bits[0]\n circuits.append(x[0]+'('+x[1]+'('+x[2]+'('+a+','+b+'),'+c+'),'+x[3]+'('+x[4]+'('+d+','+e+'),'+f+'))')\n\n# List of all circuits of type OP1(OP2(OP4(OP5(a,b),c),d),OP3(e,f))\nfor x in l:\n for _2bits_1 in _2bits_l:\n for _2bits_2 in _2bits_l:\n _4bits_1 = [x for x in _8bits if x not in _2bits_1 if x not in _2bits_2]\n _2bits_ll = list(itertools.combinations(_4bits_1, 2))\n for _2bits_3 in _2bits_ll:\n a = _2bits_1[0]; b = _2bits_1[1]; e = _2bits_2[0]; f = _2bits_2[1]\n c = _2bits_3[0]; d = _2bits_3[1]\n circuits.append(x[0]+'('+x[1]+'('+x[2]+'('+x[3]+'('+a+','+b+'),'+c+'),'+d+'),'+x[4]+'('+e+','+f+'))')\n c = _2bits_3[1]; d = _2bits_3[0]\n circuits.append(x[0]+'('+x[1]+'('+x[2]+'('+x[3]+'('+a+','+b+'),'+c+'),'+d+'),'+x[4]+'('+e+','+f+'))')\n\n# List of all circuits of type OP1(a,OP2(b,OP3(c,OP4(d,OP5(e,f)))))\nfor x in l:\n for _2bits in _2bits_l:\n e = _2bits[0]; f = _2bits[1];\n _6bits_1 = [x for x in _8bits if x not in _2bits]\n _4bits_ll = list(itertools.combinations(_6bits_1, 4))\n for _4bits in _4bits_ll:\n a = _4bits[0]; b = _4bits[1]; c = _4bits[2]; d = _4bits[2]\n circuits.append(x[0]+'('+a+','+x[1]+'('+b+','+x[2]+'('+c+','+x[3]+'('+d+','+x[4]+'('+e+','+f+')))))')\n\n# List of all circuits of type OP1(a,OP2(b,OP3(OP4(c,d),OP5(e,f))))\nfor x in l:\n for _4bits in _4bits_l:\n c = _4bits[0]; d = _4bits[1]; e = _4bits[2]; f = _4bits[3]\n _4bits_1 = [x for x in _8bits if x not in _4bits]\n _2bits_ll = list(itertools.combinations(_4bits_1, 2))\n for _2bits in _2bits_ll:\n a = _2bits[0]; b = _2bits[1]\n circuits.append(x[0]+'('+a+','+x[1]+'('+b+','+x[2]+'('+x[3]+'('+c+','+d+'),'+x[4]+'('+e+','+f+'))))')\n a = _2bits[1]; b = _2bits[0]; \n circuits.append(x[0]+'('+a+','+x[1]+'('+b+','+x[2]+'('+x[3]+'('+c+','+d+'),'+x[4]+'('+e+','+f+'))))')\n\ncircuits0 = circuits1 = circuits2 = circuits3 = circuits\ncircuits4 = circuits5 = circuits6 = circuits7 = circuits\n\n# Read input file\ncount = 0\nwith open('inputs_outputs.txt', 'r') as f:\n for line in f:\n count += 1\n print()\n print(\"##\", count)\n _in = int(line.split()[0], 2)\n _out = int(line.split()[2], 2)\n\n # Extract individual bits from input value\n b0 = (_in >> 7) & 1; b1 = (_in >> 6) & 1; b2 = (_in >> 5) & 1; b3 = (_in >> 4) & 1\n b4 = (_in >> 3) & 1; b5 = (_in >> 2) & 1; b6 = (_in >> 1) & 1; b7 = (_in >> 0) & 1\n\n # Extract individual bits from output value\n out0 = (_out >> 7) & 1; out1 = (_out >> 6) & 1; out2 = (_out >> 5) & 1; out3 = (_out >> 4) & 1\n out4 = (_out >> 3) & 1; out5 = (_out >> 2) & 1; out6 = (_out >> 1) & 1; out7 = (_out >> 0) & 1\n\n # Loop on all generated circuits to match the output bit0\n # Build a new list of circuits matching output bit0\n circuits_match0 = list()\n for c in circuits0:\n if (out0 == eval(c)):\n circuits_match0.append(c)\n circuits0 = circuits_match0\n print(\"b0 done: \", len(circuits0))\n\n # Loop on all generated circuits to match the output bit1\n # Build a new list of circuits matching output bit1\n circuits_match1 = list()\n for c in circuits1:\n if (out1 == eval(c)):\n circuits_match1.append(c)\n circuits1 = circuits_match1\n print(\"b1 done: \", len(circuits1))\n\n # Loop on all generated circuits to match the output bit2\n # Build a new list of circuits matching output bit2\n circuits_match2 = list()\n for c in circuits2:\n if (out2 == eval(c)):\n circuits_match2.append(c)\n circuits2 = circuits_match2\n print(\"b2 done: \", len(circuits2))\n\n # Loop on all generated circuits to match the output bit3\n # Build a new list of circuits matching output bit3\n circuits_match3 = list()\n for c in circuits3:\n if (out3 == eval(c)):\n circuits_match3.append(c)\n circuits3 = circuits_match3\n print(\"b3 done: \", len(circuits3))\n\n # Loop on all generated circuits to match the output bit4\n # Build a new list of circuits matching output bit4\n circuits_match4 = list()\n for c in circuits4:\n if (out4 == eval(c)):\n circuits_match4.append(c)\n circuits4 = circuits_match4\n print(\"b4 done: \", len(circuits4))\n\n # Loop on all generated circuits to match the output bit5\n # Build a new list of circuits matching output bit5\n circuits_match5 = list()\n for c in circuits5:\n if (out5 == eval(c)):\n circuits_match5.append(c)\n circuits5 = circuits_match5\n print(\"b5 done: \", len(circuits5))\n\n # Loop on all generated circuits to match the output bit6\n # Build a new list of circuits matching output bit6\n circuits_match6 = list()\n for c in circuits6:\n if (out6 == eval(c)):\n circuits_match6.append(c)\n circuits6 = circuits_match6\n print(\"b6 done: \", len(circuits6))\n\n # Loop on all generated circuits to match the output bit7\n # Build a new list of circuits matching output bit7\n circuits_match7 = list()\n for c in circuits7:\n if (out7 == eval(c)):\n circuits_match7.append(c)\n circuits7 = circuits_match7\n print(\"b7 done: \", len(circuits7))\n\n# We might have multiple circuits to give the same output bit for a given entry\n# Pick up the first one for each bit\ncircuit = [circuits0[0], circuits1[0], circuits2[0], circuits3[0], circuits4[0], circuits5[0], circuits6[0], circuits7[0]]\n\n# Re-read input file, to remove the already known values when generating all the values\nknown_values = list()\nwith open('inputs_outputs.txt', 'r') as f:\n for line in f:\n known_values.append(int(line.split()[0], 2))\n\n# Finaly generate all values with the circuit we found, excluding the already known values\nwith open(\"suite.txt\", \"w\") as f:\n for val in range(1, 256):\n if val not in known_values:\n # Extract individual bits from input value\n b0 = (val >> 7) & 1; b1 = (val >> 6) & 1; b2 = (val >> 5) & 1; b3 = (val >> 4) & 1\n b4 = (val >> 3) & 1; b5 = (val >> 2) & 1; b6 = (val >> 1) & 1; b7 = (val >> 0) & 1\n # Compute & reassemble the ouput\n out = (eval(circuit[0]) << 7) | (eval(circuit[1]) << 6) | (eval(circuit[2]) << 5) | (eval(circuit[3]) << 4) | (eval(circuit[4]) << 3) | (eval(circuit[5]) << 2) | (eval(circuit[6]) << 1) | eval(circuit[7])\n # Generate expected string: : \n f.write(bin(val)[2:].zfill(8) + \" : \" + bin(out)[2:].zfill(8) + \"\\n\")\n","repo_name":"Thesee-bzh/ECW-2022","sub_path":"Hardware/gates_3/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":8892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"2015461367","text":"import threading\nimport RPi.GPIO as GPIO\n#import alexalightson\n\nGPIO.setmode(GPIO.BOARD)\nbuttonPin = 29\nGPIO.setup(buttonPin, GPIO.IN, pull_up_down = GPIO.PUD_UP)\nisPartyModeOn = False\n\ndef buttonPressed():\n global isPartyModeOn\n \n #print(buttonState)\n if GPIO.input(buttonPin) == False:\n print(\"presseddddddddddddddd\")\n if isPartyModeOn == False:\n print(\"Party mode started\")\n isPartyModeOn = True\n\n #startPartyMode()\n else:\n #lightsOn()\n print(\"Lights on\")\n isPartyModeOn = False\n threading.Timer(0.1, buttonPressed).start()\n\n\n\n\n\n \n\nbuttonPressed()\n","repo_name":"colinwarn/alexaraspberrypilightautomation","sub_path":"py/buttonPressedScriptTest.py","file_name":"buttonPressedScriptTest.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"34771179709","text":"\"\"\"\nDice Roller Widget\n\"\"\"\n\nfrom tkinter import *\nimport random\n\nresult_r1 = 1\n\nd4 = [1, 2, 3, 4]\nd6 = [1, 2, 3, 4, 5, 6]\nd8 = [1, 2, 3, 4, 5, 6, 7, 8]\nd10 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nd12 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\nd20 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\nd100 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,\n 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,\n 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,\n 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n\n\ndef roll_d4():\n # rolls 4-sided die\n r1.delete(1.0)\n d4_result = random.choice(d4)\n r1.insert(END, d4_result)\n\n\ndef roll_d6():\n r1.delete(1.0)\n d6_result = random.choice(d6)\n r1.insert(END, d6_result)\n\n\ndef roll_d8():\n r1.delete(1.0)\n d8_result = random.choice(d8)\n r1.insert(END, d8_result)\n\n\ndef roll_d10():\n r1.delete(1.0)\n d10_result = random.choice(d10)\n r1.insert(END, d10_result)\n\n\ndef roll_d12():\n r1.delete(1.0)\n d12_result = random.choice(d12)\n r1.insert(END, d12_result)\n\n\ndef roll_d20():\n r1.delete(1.0)\n d20_result = random.choice(d20)\n r1.insert(END, d20_result)\n\n\ndef roll_d100():\n r1.delete(1.0)\n d100_result = random.choice(d100)\n r1.insert(END, d100_result)\n\n\n# Called when radio button clicked\ndef radio_used():\n sides = radio_state.get()\n return sides\n\n\n# BUTTON: Roll Dice\ndef roll():\n r1.delete(1.0, END)\n sides = radio_used()\n if sides == 4:\n roll_d4()\n elif sides == 6:\n roll_d6()\n elif sides == 8:\n roll_d8()\n elif sides == 10:\n roll_d10()\n elif sides == 12:\n roll_d12()\n elif sides == 20:\n roll_d20()\n elif sides == 100:\n roll_d100()\n\n\n# Create a new window and configurations\nwindow = Tk()\nwindow.title(\"Dice Roller Widget\")\nwindow.minsize(width=300, height=500)\n\n# LABEL: Number of sides\nnum_of_sides = Label(text=\"Number of Sides\")\nnum_of_sides.grid(column=0, row=2)\n\n# RADIO BUTTONS: 4, 6, 8, 10, 12, 20, 100\n# Variable to hold on to which radio button value is checked.\nradio_state = IntVar()\nradiobutton1 = Radiobutton(text=\"4-sided\", value=4, variable=radio_state, command=radio_used)\nradiobutton2 = Radiobutton(text=\"6-sided\", value=6, variable=radio_state, command=radio_used)\nradiobutton3 = Radiobutton(text=\"8-sided\", value=8, variable=radio_state, command=radio_used)\nradiobutton4 = Radiobutton(text=\"10-sided\", value=10, variable=radio_state, command=radio_used)\nradiobutton5 = Radiobutton(text=\"12-sided\", value=12, variable=radio_state, command=radio_used)\nradiobutton6 = Radiobutton(text=\"20-sided\", value=20, variable=radio_state, command=radio_used)\nradiobutton7 = Radiobutton(text=\"100-sided\", value=100, variable=radio_state, command=radio_used)\nradiobutton1.grid(column=0, row=3)\nradiobutton2.grid(column=0, row=4)\nradiobutton3.grid(column=0, row=5)\nradiobutton4.grid(column=0, row=6)\nradiobutton5.grid(column=0, row=7)\nradiobutton6.grid(column=0, row=8)\nradiobutton7.grid(column=0, row=9)\n\n# Button calls roll() when pressed\nroll_button = Button(text=\"Roll Dice\", command=roll)\nroll_button.grid(column=0, row=10)\n\n# LABEL: Results (1,0)\nresult = Label(text=\"Roll Result\")\nresult.grid(column=1, row=0)\nresult.config(padx=50, pady=0)\n\n# TEXT BOX\nr1 = Text(height=1, width=3)\nr1.grid(column=1, row=1)\nr1.config(padx=50, pady=0)\n\n\nwindow.mainloop()\n","repo_name":"Data-Curry/Dice_Roller_GUI_Widget","sub_path":"dice_roller_widget.py","file_name":"dice_roller_widget.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"17521519821","text":"from music21 import *\n\ndef calculate_bpm(file_path):\n # Load the MIDI file\n midi_stream = converter.parse(file_path)\n \n # Estimate the BPM using the first part of the stream (assuming a single tempo for simplicity)\n first_part = midi_stream.parts[0]\n bpm_estimate = first_part.getElementsByClass('MetronomeMark')[0].getQuarterBPM()\n \n return bpm_estimate\n\n# Specify the path to your MIDI file\nmidi_file_path = 'path_to_your_midi_file.mid'\n\n# Calculate the BPM\nbpm = calculate_bpm(midi_file_path)\nprint(\"Estimated BPM:\", bpm)\n","repo_name":"OdessaHH/BPM_KEY_FINDER","sub_path":"BKF2/music21/bpm music21.py","file_name":"bpm music21.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"6370380465","text":"from Crypto.Cipher import AES\nfrom Crypto.Random import get_random_bytes\n\n\nclass Cipher:\n header = b'fastvpn'\n\n def __init__(self, key, increment=False):\n self.key = bytearray.fromhex(key)\n # Used for tcp inc iv\n self.increment = increment\n self.iv = bytearray(12)\n\n def encrypt(self, payload: bytes) -> bytes:\n if self.increment:\n nonce = self.iv\n self.iv = inc_nonce(self.iv)\n else:\n nonce = get_random_bytes(12)\n\n cipher = AES.new(self.key, AES.MODE_GCM, nonce=nonce)\n cipher.update(Cipher.header)\n ciphertext, tag = cipher.encrypt_and_digest(payload)\n\n if self.increment:\n return ciphertext + tag\n else:\n return cipher.nonce + ciphertext + tag\n \n def decrypt(self, payload) -> bytes:\n if len(payload) <= 16:\n return None\n\n if self.increment:\n nonce = self.iv\n self.iv = inc_nonce(self.iv)\n ciphertext = payload[:-16]\n else:\n nonce = payload[:12]\n ciphertext = payload[12:-16]\n\n tag = payload[-16:]\n cipher = AES.new(self.key, AES.MODE_GCM, nonce=nonce)\n cipher.update(Cipher.header)\n\n return cipher.decrypt_and_verify(ciphertext, tag)\n\n @staticmethod\n def generate_key(length: int = 16) -> str:\n assert length == 16 or length == 24 or length == 32, 'Key length must be 16, 24, 32'\n return get_random_bytes(length).hex()\n\ndef inc_nonce(bytes):\n assert len(bytes) > 1\n\n buffer = bytes[:]\n arr = list(buffer)\n\n index = len(buffer) - 1\n overflow = 1\n\n while index >= 0 and overflow == 1:\n if arr[index] >= 255:\n arr[index] = 0\n else:\n arr[index] = arr[index] + 1\n break\n index -= 1\n\n return bytearray(arr)\n\n\ndef test():\n key = '6882ea6b1a0c71a2a249f8407a56019d'\n cipher = Cipher(key)\n\n plaintext = b'helloworld'\n ciphertext = cipher.encrypt(plaintext)\n result = cipher.decrypt(ciphertext)\n\n assert(plaintext == result)\n assert(b\"hello\".hex() == \"hello\".encode().hex())\n\n n1 = bytearray([0x00, 0x00, 0x00, 0x01])\n assert(inc_nonce(b'\\x00\\x00\\x00\\x00') == b'\\x00\\x00\\x00\\x01')\n assert(inc_nonce(b'\\x00\\x00\\x00\\xff') == b'\\x00\\x00\\x01\\x00')\n assert(inc_nonce(b'\\xff\\xff\\xff\\xff') == b'\\x00\\x00\\x00\\x00')\n assert(inc_nonce(b'\\x00\\xff\\xff\\xff') == b'\\x01\\x00\\x00\\x00')\n print('test ok')\n\n cipher2 = Cipher(key, True)\n assert cipher2.encrypt(b'\\x00\\x3b').hex() == '31c83dacefffa7e78da5156768ea9b3195f1'\n\n cipher3 = Cipher(key, True)\n assert cipher3.decrypt(bytearray.fromhex('31c83dacefffa7e78da5156768ea9b3195f1')) == b'\\x00\\x3b'\n\n\nif __name__ == '__main__':\n try:\n test()\n except ValueError as e:\n if e.args[0] == 'MAC check failed':\n print('Invalid key')\n\n\n\n","repo_name":"purkylin/fast_vpn","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72530329650","text":"\nimport logging.handlers\nimport os\n\n#定义基础url:\nBASE_URL = \"http://182.92.81.159/\"\n\nCUR_PWD = os.path.abspath(__file__)\nprint(CUR_PWD)\nBASE_DIR = os.path.dirname(CUR_PWD)\nprint(BASE_DIR)\n\nTOKEN = \"29520f86-e79d-4b6b-a6e3-843d0325dc5a\"\nheaders_data = {\"Content-Type\":\"application/json\",\"Authorization\":TOKEN}\n\n\n#新建日志方法\ndef init_my_log():\n #创建日志器\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n #创建控制器输出器\n sh = logging.StreamHandler()\n #创建文件输出器\n file_log = BASE_DIR + \"/log/ihrm.log\"\n fh = logging.handlers.TimedRotatingFileHandler(file_log,when=\"midnight\",interval=1,backupCount=7,encoding='utf-8')\n #创建格式化器\n fmt = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s'\n formater = logging.Formatter(fmt)\n #把格式化加入输出器中\n sh.setFormatter(formater)\n fh.setFormatter(formater)\n #把输出器加入日志器\n logger.addHandler(sh)\n logger.addHandler(fh)","repo_name":"zhjytest/apiTestIHRM","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"71373453489","text":"import json\nfrom pathlib import Path\n\nfrom src.paths import PLOTS_EXPERIMENT_PATH\n\n\ndef read_config_file(file_path: str) -> (dict, list):\n with open(file_path) as config_file:\n all_experiments = json.load(config_file)\n all_experiments_names = all_experiments.keys()\n return all_experiments, all_experiments_names\n\n\ndef construct_name(config: dict, experiment_name: str) -> str:\n name = (\n f\"{experiment_name}\"\n f\"[Straddled type = asymmetric | \"\n f\"Num. Epochs = {config['num_epochs']} | Learning rate = {config['learning_rate']} | \"\n f\"Num. runs = {config['num_tests']}]\"\n )\n return name\n\n\ndef make_experiment_dir(experiment_name: str):\n path = PLOTS_EXPERIMENT_PATH + experiment_name\n path_to_check = Path(path)\n if not path_to_check.is_dir():\n path_to_check.mkdir(parents=True, exist_ok=False)\n print(f\"Folder created for {path_to_check}\")\n\n\ndef make_missing_dir(experiment_name: str):\n path = experiment_name\n path_to_check = Path(path)\n if not path_to_check.is_dir():\n path_to_check.mkdir(parents=True, exist_ok=False)\n print(f\"Folder created for {path_to_check}\")\n\n\ndef print_one_run_time(start_time: float, end_time: float, name: str):\n print(\n f'{\"-\" * 20}\\n{round((end_time - start_time) / 60, 3)} minutes for 1 run of:\\n{name}\\n{\"-\" * 20}\\n'\n )\n\n\ndef print_total_time(all_start_time: float, all_end_time: float, dataset_name: str):\n print(\n f'{\"-\" * 20}\\n{round((all_end_time - all_start_time) / 60, 3)} minutes for all {dataset_name} experiments'\n )\n","repo_name":"artefactory-uk/autoencoder-paper","sub_path":"src/experiment_scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"20819185446","text":"\ndef textlog(log_name: str = \"error_log\", path: str = \".\") -> object:\n \"\"\"\n Decorator responsable to save a log.txt file contaning he error of the funcition\n\n Args:\n log_name (str, optional): Name of the log file. Defaults to \"error_log\".\n path (str, optional): path of the log file. Defaults to \".\".\n \"\"\"\n def higher_wrapper(func: object) -> object:\n def wrapper(*args, **kwargs):\n from traceback import format_exc\n try:\n func(*args, **kwargs)\n \n except Exception:\n error = format_exc()\n with open(f\"{path}/{log_name}.txt\", \"wt\") as file:\n file.writelines(error) \n return wrapper\n return higher_wrapper\n\ndef time_counter(re_run: int):\n \"\"\"\n Decorator para contar o tempo de execução de uma função.\n\n Args:\n re_run (int): Número de vezes que a função irá ser testada\n\n Returns:\n object: função original com a média de tempo entre o total de re_run\n \"\"\"\n from statistics import mean\n from time import perf_counter\n \n def higher_wrapper(function: object) -> object:\n def wrapper(*args, **kwargs) -> object:\n \n count = []\n for i in range(re_run):\n start = perf_counter()\n func = function(*args, **kwargs)\n end = perf_counter()\n count.append(end - start)\n \n time = mean(count)\n \n print(f\"Tempo de execução da função ({i + 1} re-runs): {time} segundos.\")\n return func \n return wrapper\n return higher_wrapper\n\ndef scroll_to_element(times:int=10) -> object:\n \"\"\"\n Decorator para que, em caso de certeza que o elemento HTML está na página, usar a barra de scroll\n até o elemento ficar visível.\n\n Args:\n times (int, optional): Quantidade de vezes que usará a barra de scroll. Defaults to 10.\n\n Returns:\n object: Elemento do selenium ou None caso não ache nada\n \"\"\"\n from selenium.common.exceptions import ElementClickInterceptedException, StaleElementReferenceException, TimeoutException\n from selenium.webdriver import Chrome\n def higher_wrapper(funcion: object) -> object:\n \n def wrapper(driver: Chrome):\n element = None\n i = 0\n while i <= times:\n driver.execute_script(f\"window.scrollTo(0, 1)\")\n try:\n driver.execute_script(f\"window.scrollTo(0, {100*i})\")\n \n element = funcion(driver) \n i += times\n \n except (ElementClickInterceptedException, StaleElementReferenceException, TimeoutException):\n i += 1\n if element == None:\n raise TypeError(\"Selenium Element not found!\")\n \n return element \n return wrapper\n return higher_wrapper\n\ndef merge_list(*args, unique: bool = False, tupla: bool = False) -> list:\n \"\"\"\n Função para mesclar/concatonat listas.\n\n Returns:\n list: Lista com o resultado da mesclagem\n \"\"\"\n data = []\n for arg in args:\n data += arg\n \n if unique: \n data = list(set(data))\n if tupla:\n data = tuple(data)\n \n return data\n\nclass Encrypt:\n \n def __init__(self, language: dict = None):\n \n import random\n \n letter_lower = \"abcdefghijklmnopqrstuvwxyz\"\n letter_upper = letter_lower.upper()\n other_letters_lower = \"çáãéêâîíõôóúàèìòùú'\"\n other_letters_upper = other_letters_lower.upper()\n numbers = \"0123456789\"\n symbols = r'\\/?°´ª[{]}-+=§!@#$%¨&*()$£¢¬º^~:;.<>,| \"'\n\n characteres = letter_lower + letter_upper + numbers + symbols + other_letters_lower + other_letters_upper\n \n if language:\n self.encripytion_language = language\n else:\n self.encripytion_language = {key:\" \" for key in characteres }\n \n for value in self.encripytion_language: \n while True:\n letter = \"\".join(random.choices(characteres, k=2))\n if letter not in self.encripytion_language.values():\n self.encripytion_language[value] = letter\n break\n self.encripytion_language.update({\"\\n\": \"\\n\"+ random.choice(characteres)}) \n \n def encrypt(self, message: str) -> str: \n \"\"\"\n Encripta a mensagem inserida pelo usuário\n\n Args:\n message (str): mensagem original\n\n Returns:\n str: mensagem encriptada de acordo com a linguagem definida para essa classe\n \"\"\"\n \n encrypted_message_list = [self.encripytion_language[letter] for letter in message]\n \n return \"\".join(encrypted_message_list)\n\n def decrypt(self, message: str) -> str:\n \"\"\"\n Decripta a mensagem inserida pelo usuário\n\n Args:\n message (str): mensagem encripdada para decriptar\n\n Raises:\n ValueError: levanta erro caso alguma letra do texto encriptadonão esteja na lista de letras da linuagem definida na classe\n\n Returns:\n str: mensagem decriptada de acordo com a linguagem definida para essa classe\n \"\"\"\n \n original, encrypted = [value for value in self.encripytion_language.keys()], [value for value in self.encripytion_language.values()]\n encrypted_message_list = [message[i:i + 2] for i in range(0, len(message)) if i%2 == 0]\n\n decrypted_message_list = []\n for value in encrypted_message_list:\n try:\n index = encrypted.index(value)\n except ValueError:\n raise ValueError(\"Letter not on list, you are not using correct language!\")\n decrypted_message_list.append(original[index])\n \n return \"\".join(decrypted_message_list)\n \n def __str__(self):\n print(\"Language dict {original character: encrypted character}: \",self.encripytion_language)\n \ndef verify_string(list_to_verify: list[str], string_to_verify: str) -> bool:\n \"\"\"\n Function to verify if any of the values in the list is on string\n\n Args:\n list_to_verify (list): List of strings to verify\n string_to_verify (str): string to compare\n\n Returns:\n bool: True if any name of the list is in string, False otherwise.\n \"\"\"\n boolean = False\n for value in list_to_verify:\n if value in string_to_verify:\n boolean = True\n return boolean\n\ndef is_word_similar(word:str, word_to_compare: str) -> bool:\n \"\"\"\n Function to detect if word is similar based on parameters\n\n Args:\n word (str): original word \n word_to_compare (str): word to be compared\n\n Returns:\n bool: True if word is similar, False otherwise\n \"\"\"\n count = []\n if len(word_to_compare) - len(word) < 3:\n for letter, letter_to_compare in zip(word, word_to_compare):\n if letter_to_compare == letter:\n count.append(1)\n else:\n count.append(0.1)\n return True if sum(count) > len(word_to_compare)/1.2 else False\n\n return False\n\n","repo_name":"Raimundo-Silva-Junior/nicetools","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"33790115364","text":"import django_filters\n\nfrom .models import *\n\n#Para incluir or\nfrom django.db.models import Q\n\nclass ProvFilTer(django_filters.FilterSet):\n qs = django_filters.CharFilter(method='filter_qs')\n\n def filter_qs(self, qs, name, value):\n return qs.filter(\n Q(nombre__icontains=value) | Q(descripcion__icontains=value)\n )\n\nclass ProveedorFilter(django_filters.FilterSet):\n nombre = django_filters.CharFilter(lookup_expr='icontains', label=' Nombre')\n descripcion = django_filters.CharFilter(lookup_expr='icontains', label=' Descripción')\n contacto = django_filters.CharFilter(lookup_expr='icontains', label=' Contacto')\n\n class meta:\n model = m_proveedor\n fields = ['nombre', 'descripcion', 'contacto']\n \n #def filter_qs(self, qs, name, value):\n # return qs.filter(\n # Q(nombre__icontains=value) | Q(descripcion__icontains=value)\n # )\n #groups = [\n # CombinedGroup(filters=['first_name', 'last_name'], combine=operator.or_),\n #]","repo_name":"TonyEspinosa/SEFOET2","sub_path":"apps/proveedor/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"1120511726","text":"#!/usr/bin/env ipython\n# coding: utf-8\n\nimport tensorflow as tf\nimport numpy as np\n\n\nclass foo(object):\n pass\n\n\nFLAGS = foo()\nFLAGS.path = 'train.TFRecord'\nFLAGS.batch_size = 16\nFLAGS.capacity = 512\nFLAGS.threads = 8\n\n\ndef parse_example(filename_queue):\n # Define how to parse the example\n\n reader = tf.TFRecordReader()\n _, example = reader.read(filename_queue)\n\n context_features = {\n 'length': tf.FixedLenFeature([], dtype=tf.int64),\n 'label': tf.FixedLenFeature([], dtype=tf.int64)\n }\n sequence_features = {\n 'data': tf.FixedLenSequenceFeature([], dtype=tf.float32)\n }\n con_parsed, seq_parsed = tf.parse_single_sequence_example(\n serialized=example,\n context_features=context_features,\n sequence_features=sequence_features\n )\n res = (seq_parsed['data'],\n con_parsed['length'],\n con_parsed['label'])\n return res\n\n\ndef get_batch_producer(\n path=FLAGS.path,\n batch_size=FLAGS.batch_size,\n prefetch_size=FLAGS.capacity,\n num_of_threads=FLAGS.threads,\n scope='batch_producer'):\n\n if isinstance(path, str):\n path = [path]\n with tf.name_scope(scope):\n filename_queue = tf.train.string_input_producer(\n path, name='filename_producer', shuffle=True)\n with tf.name_scope('example_producer'):\n data, seq_len, label = parse_example(filename_queue)\n data = tf.placeholder_with_default(data, [None], name='data')\n label = tf.cast(label, tf.int32, name='label')\n seq_len = tf.cast(seq_len, tf.int32, name='seq_length')\n\n # THIS IS STILL NOT AVAILABLE IN TENSORFLOW\n # https://github.com/tensorflow/tensorflow/issues/5147\n # shuffle batch with dynamic padding... duh... no workaround\n # ValueError: All shapes must be fully defined:\n # [TensorShape([Dimension(None)]), TensorShape([]), TensorShape([])]\n \"\"\"with tf.name_scope('shuffle_batch_producer'):\n q = tf.RandomShuffleQueue(\n capacity=2*prefetch_size,\n min_after_dequeue=prefetch_size,\n dtypes=[tf.float32, tf.int32, tf.int32],\n shapes=[[None], [], []], name='shuffle_queue')\n\n enqueue_op = q.enqueue([data, seq_len, label], name='push_single_example')\n qr = tf.train.QueueRunner(q, [enqueue_op]*num_of_threads)\n tf.train.add_queue_runner(qr)\n batch_op = q.dequeue_many(n=batch_size, name='pop_batch')\n \"\"\"\n\n batch_op = tf.train.batch(\n [data, seq_len, label],\n batch_size,\n num_of_threads,\n prefetch_size,\n shapes=[[None], [], []],\n dynamic_pad=True,\n name='padded_batch_queue')\n\n \"\"\"if shuffle:\n batch_op = tf.train.shuffle_batch(\n [data],\n batch_size,\n capacity=5*prefetch_size,\n min_after_dequeue=3*prefetch_size,\n num_threads=num_of_threads,\n enqueue_many=False,\n #shapes=[[None], [], []],\n name='shuffle_batch_producer')\n \"\"\"\n\n \"\"\"with tf.name_scope('padded_batch_producer'):\n q = tf.PaddingFIFOQueue(\n capacity=prefetch_size,\n dtypes=[tf.float32, tf.int32, tf.int32],\n shapes=[[None], [], []], name='padding_queue')\n\n enqueue_op = q.enqueue([data, seq_len, label], name='push_single_example')\n qr = tf.train.QueueRunner(q, [enqueue_op]*num_of_threads)\n tf.train.add_queue_runner(qr)\n batch_op = q.dequeue_many(n=batch_size, name='pop_batch')\n \"\"\"\n return batch_op\n\n\ndef get_even_batch_producer(paths,\n batch_size=FLAGS.batch_size,\n prefetch_size=FLAGS.capacity,\n num_of_threads=FLAGS.threads):\n\n tf.assert_greater_equal(\n batch_size, len(paths), data=[batch_size, len(paths)],\n message='batch_size must be greater than the number of classes')\n\n sub_batch_size = batch_size // len(paths)\n input_prods = []\n for path in paths:\n input_prods.append(get_batch_producer(\n path,\n batch_size=sub_batch_size,\n prefetch_size=FLAGS.capacity,\n num_of_threads=FLAGS.threads,\n scope='producer_%s' % path\n ))\n\n '''batch_op = tf.train.batch_join(\n list(zip(*input_prods)),\n batch_size,\n #num_of_threads,\n capacity=prefetch_size,\n shapes=[[None], [], []],\n dynamic_pad=True,\n enqueue_many=True,\n name='even_batch_producer')'''\n\n with tf.name_scope('even_batch_producer'):\n q = tf.PaddingFIFOQueue(\n capacity=prefetch_size,\n dtypes=[tf.float32, tf.int32, tf.int32],\n shapes=[[None], [], []], name='padding_even_queue')\n\n for data, seq_len, label in input_prods:\n enqueue_op = q.enqueue_many(\n [data, seq_len, label], name='push_many_example_of_class')\n qr = tf.train.QueueRunner(q, [enqueue_op] * num_of_threads)\n tf.train.add_queue_runner(qr)\n\n batch_op = q.dequeue_many(n=batch_size, name='pop_batch')\n\n return batch_op, input_prods\n","repo_name":"botcs/cinc2017","sub_path":"data/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"33871485000","text":"from datetime import timedelta\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\nfrom src import crud, schemas\nfrom src.core.dependencies import (\n ACCESS_TOKEN_EXPIRE_MINUTES,\n create_access_token,\n get_db,\n)\n\nrouter = APIRouter()\n\n\n@router.post(\"/client/register\", tags=[\"customer\"])\nasync def register(client: schemas.Client, db: Session = Depends(get_db)):\n db_user = crud.get_client(db, phone=client.phone)\n if db_user:\n raise HTTPException(status_code=201, detail=\"Sucess\")\n else:\n crud.create_client(db, client.phone)\n raise HTTPException(status_code=201, detail=\"Sucess\")\n raise HTTPException(status_code=400, detail=\"Acess denied\")\n\n\n@router.post(\n \"/client/login\",\n response_model=schemas.Token,\n status_code=status.HTTP_201_CREATED,\n tags=[\"customer\"],\n)\nasync def login(client: schemas.AuthenticationMobile, db: Session = Depends(get_db)):\n db_user = crud.get_client(db, phone=client.phone)\n if not db_user:\n raise HTTPException(status_code=400, detail=\"Acess denied\")\n if client.code != \"0\":\n raise HTTPException(status_code=401, detail=\"Auth problem\")\n access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n {\"phone\": client.phone, \"code\": client.code},\n expires_delta=access_token_expires,\n )\n return schemas.Token(\n access_token=access_token, refresh_token=access_token, ws_token=access_token\n )\n","repo_name":"anatoly23/boatbooking","sub_path":"src/api/customerauth.py","file_name":"customerauth.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"17156056340","text":"from __future__ import absolute_import\n\nimport numpy as np\nimport numpy.linalg as npl\n\n\ndef io_orientation(affine, tol=None):\n ''' Orientation of input axes in terms of output axes for `affine`\n\n Valid for an affine transformation from ``p`` dimensions to ``q``\n dimensions (``affine.shape == (q + 1, p + 1)``).\n\n The calculated orientations can be used to transform associated\n arrays to best match the output orientations. If ``p`` > ``q``, then\n some of the output axes should be considered dropped in this\n orientation.\n\n Parameters\n ----------\n affine : (q+1, p+1) ndarray-like\n Transformation affine from ``p`` inputs to ``q`` outputs. Usually this\n will be a shape (4,4) matrix, transforming 3 inputs to 3 outputs, but the\n code also handles the more general case\n tol : {None, float}, optional\n threshold below which SVD values of the affine are considered zero. If\n `tol` is None, and ``S`` is an array with singular values for `affine`,\n and ``eps`` is the epsilon value for datatype of ``S``, then `tol` set to\n ``S.max() * eps``.\n\n Returns\n -------\n orientations : (p, 2) ndarray\n one row per input axis, where the first value in each row is the closest\n corresponding output axis. The second value in each row is 1 if the input\n axis is in the same direction as the corresponding output axis and -1 if\n it is in the opposite direction. If a row is [np.nan, np.nan], which can\n happen when p > q, then this row should be considered dropped.\n '''\n affine = np.asarray(affine)\n q, p = affine.shape[0]-1, affine.shape[1]-1\n # extract the underlying rotation, zoom, shear matrix\n RZS = affine[:q, :p]\n zooms = np.sqrt(np.sum(RZS * RZS, axis=0))\n # Zooms can be zero, in which case all elements in the column are zero, and\n # we can leave them as they are\n zooms[zooms == 0] = 1\n RS = RZS / zooms\n # Transform below is polar decomposition, returning the closest\n # shearless matrix R to RS\n P, S, Qs = npl.svd(RS, full_matrices=False)\n # Threshold the singular values to determine the rank.\n if tol is None:\n tol = S.max() * np.finfo(S.dtype).eps\n keep = (S > tol)\n R = np.dot(P[:, keep], Qs[keep])\n # the matrix R is such that np.dot(R,R.T) is projection onto the\n # columns of P[:,keep] and np.dot(R.T,R) is projection onto the rows\n # of Qs[keep]. R (== np.dot(R, np.eye(p))) gives rotation of the\n # unit input vectors to output coordinates. Therefore, the row\n # index of abs max R[:,N], is the output axis changing most as input\n # axis N changes. In case there are ties, we choose the axes\n # iteratively, removing used axes from consideration as we go\n ornt = np.ones((p, 2), dtype=np.int8) * np.nan\n for in_ax in range(p):\n col = R[:, in_ax]\n if not np.alltrue(np.equal(col, 0)):\n out_ax = np.argmax(np.abs(col))\n ornt[in_ax, 0] = out_ax\n assert col[out_ax] != 0\n if col[out_ax] < 0:\n ornt[in_ax, 1] = -1\n else:\n ornt[in_ax, 1] = 1\n # remove the identified axis from further consideration, by\n # zeroing out the corresponding row in R\n R[out_ax, :] = 0\n return ornt\n","repo_name":"Raniac/NEURO-LEARN","sub_path":"env/lib/python3.6/site-packages/nipy/fixes/nibabel/orientations.py","file_name":"orientations.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"20"} +{"seq_id":"25275138254","text":"#!/usr/bin/env python3\n# License: BSD\n\nimport os\nimport pytest\nimport sys\nfrom time import sleep\napifolder = os.getcwd()\nsys.path.append(apifolder)\nfrom functions import DELETE, POST, PUT, GET\nfrom auto_config import interface\n\nReason = \"VM detected no real ATA disk\"\n\nnot_real = (\n interface == \"vtnet0\"\n or interface == \"em0\"\n or 'enp0s' in interface\n)\n\n\ndef test_05_enable_smartd_service_at_boot():\n results = GET('/service/?service=smartd')\n smartid = results.json()[0]['id']\n\n results = PUT(f'/service/id/{smartid}/', {\"enable\": True})\n assert results.status_code == 200, results.text\n\n\ndef test_06_look_smartd_service_at_boot():\n results = GET('/service/?service=smartd')\n assert results.status_code == 200, results.text\n assert results.json()[0][\"enable\"] is True, results.text\n\n\n# Read test below only on real hardware\nif not_real is False:\n def test_07_starting_smartd_service():\n payload = {\"service\": \"smartd\"}\n results = POST(\"/service/start/\", payload)\n assert results.status_code == 200, results.text\n sleep(1)\n\n def test_08_checking_to_see_if_smartd_service_is_running():\n results = GET('/service/?service=smartd')\n assert results.json()[0][\"state\"] == \"RUNNING\", results.text\n","repo_name":"truenas/middleware","sub_path":"tests/api2/test_410_smart.py","file_name":"test_410_smart.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":2144,"dataset":"github-code","pt":"20"} +{"seq_id":"37820867859","text":"import os\r\nfrom pickle import TRUE\r\nfrom fileinput import close\r\nimport os\r\nos.system('cls')\r\nfrom tkinter import *\r\nfrom PIL import ImageTk\r\nimport PIL.Image\r\nimport sys\r\nfrom tkinter import filedialog as fd\r\nfrom tkinter.messagebox import showinfo\r\nfrom tkinter.messagebox import askyesno\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\nfrom tkinter import ttk\r\nimport tkinter as tk\r\n\r\nos.system('cls')\r\n\r\n##################################################\r\nconn = sqlite3.connect('teams.sqlite')\r\ncur = conn.cursor()\r\n\r\ncur.execute('DROP TABLE IF EXISTS teams')\r\n\r\ncur.execute('CREATE TABLE teams (isim TEXT, değer INTEGER)')\r\n##################################################\r\nhandle=open('text3.txt',\"a\")\r\nroot=Tk()\r\nroot.title(\"Welcome to Workers app\")\r\nroot.geometry(\"800x570\")\r\nbg = PIL.Image.open(\"Rute_logo.png\")\r\nimg=bg.resize((350, 350))\r\nmy_img=ImageTk.PhotoImage(img)\r\nlabel1 = Label( root, image = my_img)\r\nlabel1.place(x = 360,y = 170)\r\n########################\r\n\r\ntext=Text(root, width=35, height=22,bg = \"light cyan\")\r\ntext.place(x=5,y=200)\r\n\r\nsifirmi=0\r\n\r\n\r\ndef sifirla():\r\n answer = askyesno(title='Onay',message='Verileri silmek istediğinen emin misin ?')\r\n if answer:\r\n handle=open('text3.txt',\"w\")\r\n text.delete('1.0', END)\r\n text.insert(END, \"isim soyisim değerlendirmesi\\n-----------------------------------\")\r\n sifirmi=1\r\nnew = tk.Button(\r\n root,\r\n text='Verileri sıfırla.',height=4, width=10,bg='red',\r\n command=sifirla\r\n)\r\nnew.place(x=10, y=80)\r\n########################\r\n\r\n\r\n\r\n\r\ntext.insert(END, \"isim soyisim değerlendirmesi\\n-----------------------------------\")\r\nf= open(\"text3.txt\")\r\n#t is a Text widget\r\ntext.insert(END, f.read())\r\n\r\n#text.insert(END,\"\\n\")\r\n\r\n\r\nttk.Label(root,text=\"Hoş Geldiniz\",font=(\"Arial Bold\", 12)).pack()\r\n\r\n\r\nttk.Label(root,text =\"isim soyisim:\",font=(\"Arial Bold\", 10)).pack()\r\nentry1=ttk.Entry(root,width=25)\r\nentry1.pack()\r\n\r\nttk.Label(root,text =\"dğerlendirmesi(10 üzerinden):\",font=(\"Arial Bold\", 10)).pack()\r\nentry2=ttk.Entry(root,width=25)\r\nentry2.pack()\r\n\r\nbu1=ttk.Button(root,text=\"Ekle\",width=25)\r\n\r\nbu1.pack()\r\nbu2=tk.Button(root,text=\"Bu kadar.\",height=4, width=10,bg='green')\r\nbu2.pack()\r\nbu2.place(x=600, y=75)\r\n#txtlist=tk.Text(root).pack()\r\n#txtlist.place(x = 180,y = 170)\r\n\r\n\r\ndef buclick():\r\n isim=entry1.get()\r\n isler=entry2.get()\r\n print(isim,isler)\r\n\r\n\r\n Text(root)\r\n handle.write(isim)\r\n handle.write(\"\\t\")\r\n handle.write(isler)\r\n handle.write(\"\\n\")\r\n entry1.delete(0,END)\r\n entry2.delete(0,END)\r\n #text.get(isim)\r\n #text.insert(entry1.get())\r\n text.insert(END, isim+\" \"+isler+\"\\n\")\r\n\r\n\r\ndef buclick2():\r\n cur.execute('DROP TABLE IF EXISTS workers')\r\n root.destroy()\r\ndef disable_event():\r\n sys.exit()\r\n\r\nroot.protocol(\"WM_DELETE_WINDOW\", disable_event) # programın x tuşu or alt+f4\r\nbu1.config(command=buclick)\r\n\r\nbu2.config(command=buclick2)\r\nroot.resizable(width=False, height=False)\r\n\r\n\r\n\r\n\r\nroot.mainloop()\r\n########################\r\n\r\n\r\n##################################################\r\nimport os\r\nfrom pickle import TRUE\r\nimport sqlite3\r\n\r\nos.system('cls')\r\n\r\n##################################################\r\nconn = sqlite3.connect('teams.sqlite')\r\ncur = conn.cursor()\r\n\r\ncur.execute('DROP TABLE IF EXISTS teams')\r\n\r\ncur.execute('CREATE TABLE teams (isim TEXT, değer INTEGER)')\r\n##################################################\r\n#algoritma başlangıcı\r\npuanlar=list()\r\nadlar=list()\r\n\r\nteam1=list()\r\nteam2=list()\r\nliste=dict()\r\nteam1kisileri=list()\r\nteam2kisileri=list()\r\n\r\n\r\n\r\nhandle=open('text3.txt')\r\nfor line in handle:\r\n line=line.rstrip()\r\n a=line.split('\\t')\r\n puanlar.append(int(a[1]))\r\n adlar.append(a[0])\r\n liste[a[0]]=liste.get(a[0],a[1])\r\n cur.execute(\"INSERT INTO TEAMS (isim,değer) VALUES (?,?)\",(a[0],int(a[1])))\r\n\r\n\r\n\r\ntemp3=list()\r\nfor x in puanlar:\r\n temp3.append(x)\r\nprint('Liste :',liste)\r\n\r\nortalama=sum(puanlar)/2\r\nprint('Ortalama :',ortalama)\r\nprint(adlar)\r\nprint(puanlar)\r\npuanlar.sort()\r\nprint(\"sorted:\",puanlar)\r\n\r\nbirtanedaha=0\r\nif len(puanlar)%2==1:\r\n birtanedaha=1\r\n\r\nx1=(len(puanlar))/2\r\nx1=x1-0.1\r\nprint(x1)\r\nx1=round(x1)\r\nprint(\"kişi sayısı\",x1)\r\n\r\npuanlar2=puanlar[::-1]\r\n\r\nz2=len(puanlar)\r\nz=0\r\nwhile z sum(team1):\r\n t1=0\r\n print(\"team2 sum is higher\")\r\n\r\ncc=1\r\nprint(\"temp 1 :\",temp1)\r\nif t1==0 :\r\n for x in team1:\r\n if cc==1:\r\n for y in team2:\r\n if sum(team2)-sum(team1)>10 and cc==1:\r\n if(abs(x-y))==round((d1-sum(team1))/2) or (abs(x-y))==round((d2-sum(team1))/2):\r\n if xx:\r\n temp1.append(y)\r\n temp2.append(x)\r\n temp1.remove(x)\r\n temp2.remove(y)\r\n print(\"xxxxxxxxxxxxxx\",x,y)\r\n cc=0\r\n break\r\n\r\nif len(team1)>len(team2)+1:\r\n u=abs(ortalama-sum(team2))\r\n u=u-0.1\r\n u1=round(u)\r\n u2=u1+1\r\n if u1 in team1 :\r\n team2.append(u1)\r\n team1.remove(u1)\r\n elif u2 in team1:\r\n team2.append(u2)\r\n team1.remove(u2)\r\n\r\nc=1\r\nfor x in team1:\r\n for y in team2:\r\n if(sum(temp1)>sum(temp2)) and cc==1:\r\n if(x-y)==abs(ortalama-sum(team1)) and c==1:\r\n print(temp1)\r\n temp1.append(y)\r\n temp2.append(x)\r\n temp1.remove(x)\r\n temp2.remove(y)\r\n print(x,y)\r\n c=0\r\n print(\"zzzzzzzzzzz\",temp1)\r\n break\r\nif c==1:\r\n for x in team1:\r\n for y in team2:\r\n if ((x-y)==abs(d1-sum(team1)) or (x-y)==abs(d2-sum(team1))) and c==1 and cc==1:\r\n print(temp1)\r\n temp1.append(y)\r\n temp2.append(x)\r\n temp1.remove(x)\r\n temp2.remove(y)\r\n print(x,y)\r\n c=0\r\n print(\"yyyyyyyyy\",temp1)\r\n break\r\n \r\n\"\"\" print(\"\\n\\nteam 1 :\",team1,\"toplam =\",sum(team1),team1kisileri)\r\nprint(\"team 2 :\",team2,\"toplam =\",sum(team2),team2kisileri) \"\"\"\r\n#print(\"temp 1 :\",temp1)\r\n\r\n\r\ncur.execute('DROP TABLE IF EXISTS team1')\r\ncur.execute('DROP TABLE IF EXISTS team2')\r\ncur.execute(\"CREATE TABLE team1 (isim TEXT, değer INTEGER)\")\r\ncur.execute(\"CREATE TABLE team2 (isim TEXT, değer INTEGER)\")\r\n\r\n\r\nfor x in team1:\r\n p=temp3.index(x)\r\n yapan=adlar[p]\r\n team1kisileri.append(yapan)\r\n temp3.pop(p)\r\n adlar.pop(p)\r\n\r\nfor x in team2:\r\n p=temp3.index(x)\r\n yapan=adlar[p]\r\n team2kisileri.append(yapan)\r\n temp3.pop(p)\r\n adlar.pop(p)\r\n\r\nfor x,y in zip(team1,team1kisileri):\r\n cur.execute(\"INSERT INTO team1 (isim,değer) VALUES (?,?)\",(y,int(x)))\r\nfor x,y in zip(team2,team2kisileri):\r\n cur.execute(\"INSERT INTO team2 (isim,değer) VALUES (?,?)\",(y,int(x)))\r\n\r\n\r\n\r\nconn.commit()\r\n\r\n\r\nprint(\"\\n\\nteam 1 :\",team1,\"toplam =\",sum(team1),\", kişileri:\",team1kisileri)\r\nprint(\"team 2 :\",team2,\"toplam =\",sum(team2),\", kişileri:\",team2kisileri)\r\n\r\n\r\n###############################################\r\n#proogram biitşinde tablo\r\n\r\nwin = Tk()\r\nwin.title(\"Takımların listesi\")\r\n# Set the size of the tkinter window\r\nwin.geometry(\"800x350\")\r\n\r\n# Create an object of Style widget\r\nstyle = ttk.Style()\r\nstyle.theme_use('winnative')#winnative#clam\r\n\r\n# Add a Treeview widget\r\ntree = ttk.Treeview(win, column=(\"Takım 1 üyeleri\", \"Takım 2 üyeleri\"), show='headings', height=len(team1))\r\ntree.column(\"# 1\", anchor=CENTER)\r\ntree.heading(\"# 1\", text=\"Takım 1 üyeleri\")\r\ntree.column(\"# 2\", anchor=CENTER)\r\ntree.heading(\"# 2\", text=\"Takım 2 üyeleri\")\r\n\r\n\r\n\r\n\r\nconn = sqlite3.connect(\"teams.sqlite\")\r\ncur = conn.cursor()\r\ncur.execute(\"SELECT isim FROM team1\")\r\nrows = cur.fetchall()\r\ncur.execute(\"SELECT isim FROM team2\")\r\nrows2 = cur.fetchall()\r\nfor row,row2 in zip(rows,rows2):\r\n tree.insert(\"\", tk.END, values=(row,row2))\r\n\"\"\" tree.insert(\"\",column=1,values=row)\r\n tree.insert(\"\",column=2,values=row2) \"\"\"\r\nconn.close()\r\n\r\ntree.pack()\r\n\r\nwin.mainloop()\r\n\r\n\r\n","repo_name":"Momec96/project-manegement-program-last-version","sub_path":"groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"38866153438","text":"import re\n\nimport sublime\nimport sublime_plugin\n\n\n# def merge_selections(sel):\n# new_selections = []\n# selection_regions = []\n# for selection in sel:\n# selection = selection.cover(view.line(selection.end()))\n# if selection_regions:\n# last = selection_regions[-1]\n# if last.contains(selection):\n# continue\n# if last.intersects(selection):\n# selection_regions[-1] = last.cover(selection)\n# else:\n# new_selections.append(selection)\n# selection_regions.append(sel)\n\n\ndef join_lines(edit, view):\n new_selections = []\n replacements = []\n num_removed_chars = 0\n for selection in view.sel():\n if replacements and replacements[-1][0].contains(selection):\n pattern = r'\\n[ \\t]*'\n new_selections.pop()\n else:\n pattern = r'[ \\t]*\\n[ \\t]*'\n curr_line = view.line(selection.end())\n next_line_indent = view.find(pattern, curr_line.begin())\n space = ' '\n if view.substr(next_line_indent.end()) == '\\n':\n space = ''\n\n if selection.empty():\n cursor_point = next_line_indent.a - num_removed_chars\n if space:\n cursor_point += 1\n new_selections.append(cursor_point)\n else:\n spaces = view.find(r'[ \\t]*', selection.end())\n selection = selection.cover(spaces)\n original = view.substr(selection)\n one_line = re.sub(r'\\s*\\n\\s*', ' ', original)\n a = selection.a - num_removed_chars\n if one_line != original:\n replacements.append((selection, one_line))\n num_removed_chars += len(original) - len(one_line)\n b = selection.b - num_removed_chars - original[-1].isspace()\n new_selections.append(sublime.Region(a, b))\n\n replacements.append((next_line_indent, space))\n num_removed_chars += next_line_indent.size() - len(space)\n\n for region, content in reversed(replacements):\n view.replace(edit, region, content)\n\n view.sel().clear()\n view.sel().add_all(new_selections)\n\n\nclass ReJoinLinesCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n join_lines(edit, self.view)\n","repo_name":"absop/ST-MyConfig","sub_path":"Packages/join_lines.py","file_name":"join_lines.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"8544371124","text":"import numpy as np\nimport scipy.stats\n\nfrom .utils import get_μ, define_mu, define_us_for_V\n\nnorm = scipy.stats.norm()\n\n\n\ndef V0_th_P2(u):\n \"\"\"Compute the expected value of the normalised first MF v0 at threshold u for the sum of two squared Gaussian isotropic fields normalised for their standard deviations.\n\n Parameters\n ----------\n u : np.array\n Thresholds at which the first MF is computed.\n \n Returns\n -------\n v0 : np.array\n The expected value of the normalised first MF at thresholds u.\n \n \"\"\" \n return 1-((1-np.exp(-u/2.)) * (u>=0.))\n\ndef V1_th_P2(u, μ):\n \"\"\"Compute the expected value of the normalised second MF v1 at threshold u for the sum of two squared Gaussian isotropic fields normalised for their standard deviations.\n\n Parameters\n ----------\n u : np.array\n Thresholds at which the second MF is computed.\n \n μ : float\n The derivative of the covariance function at the origin for the Gaussian isotropic scalar field.\n \n Returns\n -------\n v1 : np.array\n The expected value of the normalised second MF at thresholds u.\n \n \"\"\" \n return np.sqrt(μ * (u) / 4.) * np.exp(-u/2.) * (u>=0.) * (np.sqrt(np.pi/8.))\n\ndef V2_th_P2(u, μ):\n \"\"\"Compute the expected value of the normalised third MF v2 at threshold u for the sum of two squared Gaussian isotropic fields normalised for their standard deviations.\n\n Parameters\n ----------\n u : np.array\n Thresholds at which the third MF is computed.\n \n μ : float\n The derivative of the covariance function at the origin for the Gaussian isotropic scalar field.\n \n Returns\n -------\n v2 : np.array\n The expected value of the normalised third MF at thresholds u.\n \n \"\"\" \n return (((μ * (u-1.) * np.exp(-u/2.)) / (2.*np.pi) ) * (u>=0.)) \n\n\n\n\n\nclass TheoryP2():\n \"\"\"Class to compute the expected values of Minkowski functionals (MFs) for the sum of two squared Gaussian isotropic fields normalised for their standard deviations defined on the sphere \n like the polarised intensity of the CMB ($P^2 = Q^2 + U^2$).\n\n Parameters\n ----------\n us : np.array, optional\n The thresholds at which the theoretical MFs will be computed. \n If not given, a range between 0 and 5σ with steps of 0.1σ is considered, \n with σ=1 the expected standard deviation of the fields U and Q.\n \n cls : np.array, optional\n The angular power spectrum associated to the Gaussian isotropic fields. \n Shape '(..., lmax+1)'. '...' can be 2 (EE, BB) or absent (assumed to be EE+BB).\n Default : None \n \n μ : float, optional\n The derivative of the covariance function at the origin for each of the two independent Gaussian isotropic fields (i.e., U and Q in the cosmological case).\n If both μ and cls are given, an error will be raised.\n If only cls is given, μ will be computed from input cls.\n If neither μ nor cls are given, μ is assumed to be 1.\n Default : None\n \n average_bin : bool, optional\n If True, the results of V1 and V2 are the average on each bin, to be compared with binned computations on maps.\n If False, the results are the evaluation on the center of each bin.\n The value is always exactly computed for V0, as the computation on maps does not imply binning.\n Defaul : True\n \n edges : bool, optional\n If False, the given 'us' is considered as an array of uniformly distributed thresholds. \n If True, input 'us' is considered as a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform distributions of thresholds. \n In this last case, the thresholds are the central value of the given bins.\n Neglected if 'us' is not given.\n Default : False.\n\n Attributes\n ----------\n us : np.array\n The thresholds at which the theoretical MFs are computed. \n \n μ : float\n The derivative of the covariance function at the origin for the sum of two squared Gaussian isotropic fields.\n \n \"\"\" \n def __init__(self, us=None, Cls=None, μ=None, average_bin=True, edges=False):\n \"\"\"Class to compute the expected values of Minkowski functionals (MFs) for a Gaussian isotropic scalar field defined on the sphere.\n\n \"\"\" \n if (us is None):\n Δu = 0.05\n self.us = np.arange(Δu/2., 5.+Δu/2., Δu)\n self.dus = Δu*np.ones(self.us.shape[0])\n else:\n us = np.array(us)\n if us.shape == (1,):\n self.us = us\n self.dus = 0.\n else:\n if edges:\n self.dus = (us[1:]-us[:-1])\n self.us = (us[1:]+us[:-1])/2.\n else:\n self.us = us\n self.dus = (us[1]-us[0])*np.ones(us.shape[0])\n\n if (Cls is not None) and (μ is None):\n if (Cls.ndim == 2) and (Cls.shape[0]==2):\n cls = (cls[0]+cls[1])/2.\n elif (Cls.ndim == 1):\n cls = cls/2.\n else:\n raise ValueError(r\"Cls dimension has to be either (2,lmax+1) or (lmax+1)\")\n \n self.μ = define_mu(Cls,μ) \n if not average_bin:\n self.dus = 0.*self.dus\n \n \n def V0(self):\n \"\"\"Compute the expected values of the normalised first MF v0 at the different thresholds us.\n\n $$\\mathbb{E}[{v_{0}(u)}] = \\exp (-u/2)$$\n \"\"\" \n return (V0_th_P2(self.us))\n\n def V1(self):\n \"\"\"Compute the expected values of the normalised second MF v1 at the different thresholds us.\n \n $$\\mathbb{E}[{v_{1}(u)}] = {\\sqrt{2\\pi } \\over 8} \\sqrt{\\mu u}\\exp (-{u \\over 2})$$\n \"\"\" \n us_ = define_us_for_V(self.us,self.dus)\n v1_ = V1_th_P2(us_, self.μ)\n \n return np.mean(v1_, axis=1)\n \n def V2(self):\n \"\"\"Compute the expected values of the normalised third MF v2 at the different thresholds us.\n \n $$\\mathbb{E}[{v_{2}(u)}] = \\mu {(u-1)\\exp (-u/2) \\over 2\\pi }$$\n \"\"\" \n us_ = define_us_for_V(self.us,self.dus)\n v2_ = V2_th_P2(us_, self.μ)\n \n return np.mean(v2_, axis=1)\n\n\n__all__ = [\"TheoryP2\"]\n\n__docformat__ = \"numpy\"\n\n","repo_name":"GraCosPA/pynkowski","sub_path":"pynkowski/theory/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"20"} +{"seq_id":"22796709270","text":"#!/usr/bin/env python3\n# module for handling ITEPKG03 file format\nfrom itepkg.exceptions import *\nfrom itepkg.entries import *\n\nclass ITEPKG:\n\n magic = b'ITEPKG03'\n header_length = 0x40\n\n def __init__(self, header, checksum=0):\n self.magic = ITEPKG.magic\n\n if isinstance(header, bytes):\n self.header = header\n else:\n self.header = bytes(header)\n\n if len(self.header) != (ITEPKG.header_length - len(self.magic)):\n raise Exception('Wrong length of header: %d' % len(self.header))\n\n self.entries = []\n self.checksum = uint32(checksum)\n\n def append(self, entry):\n self.entries.append(entry)\n\n def __bytes__(self):\n entries = b''\n for e in self.entries:\n entries += bytes(e)\n return self.magic + self.header + entries\n\n def __str__(self):\n entries = ', '.join([str(e) for e in self.entries])\n return '{magic = %s, unknown = %s, entries = [%s]}' % (self.magic,\n self.header, entries)\n\n def __repr__(self):\n raise Exception('Not implemented')\n\n def __len__(self):\n return len(bytes(self))\n\n def from_bytes(b):\n magic, b = b[:len(ITEPKG.magic)], b[len(ITEPKG.magic):]\n if magic != ITEPKG.magic:\n raise ParsingException('Wrong magic: %s' % magic)\n hdrlen = ITEPKG.header_length - len(ITEPKG.magic)\n header, b = b[:hdrlen], b[hdrlen:]\n obj = ITEPKG(header)\n\n entry_type = None\n while len(b) > 0 and entry_type is not Entry.End:\n entry_type, b = Entry.from_bytes(b)\n entry, b = entry_types[entry_type].from_bytes(b)\n obj.append(entry)\n\n checksum, b = uint32.from_bytes(b)\n obj.checksum = checksum\n\n return obj, b\n","repo_name":"v3l0c1r4pt0r/ittk","sub_path":"itepkg/itepkg.py","file_name":"itepkg.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"71584175411","text":"\"\"\"Config flow for ElevenLabs integration.\"\"\"\nimport logging\n\nimport voluptuous as vol\nfrom homeassistant import config_entries\nfrom homeassistant.const import CONF_API_KEY\n\nfrom .tts import CONF_VOICE\n\n_LOGGER = logging.getLogger(__name__)\n\nDATA_SCHEMA = vol.Schema(\n {\n vol.Required(CONF_API_KEY): str,\n vol.Required(CONF_VOICE): str,\n }\n)\n\n\nclass ConfigFlow(config_entries.ConfigFlow, domain=\"elevenlabs\"):\n \"\"\"Example config flow.\"\"\"\n # The schema version of the entries that it creates\n # Home Assistant will call your migrate method if the version changes\n VERSION = 1\n CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n errors = {}\n\n if not getattr(user_input, CONF_API_KEY, None):\n errors[CONF_API_KEY] = \"Need API key\"\n\n elif not getattr(user_input, CONF_VOICE, None):\n errors[CONF_VOICE] = \"Need to select a voice\"\n\n else:\n try:\n return self.async_create_entry(title=\"ElevenLabs\", data=user_input)\n except Exception as err:\n _LOGGER.exception(err)\n errors[\"base\"] = \"An unknown error occurred.\"\n\n # If there is no user input or there were errors, show the form again,\n # including any errors that were found with the input.\n return self.async_show_form(\n step_id=\"user\", data_schema=DATA_SCHEMA, errors=errors\n )\n","repo_name":"chdefrene/elevenlabs-tts","sub_path":"custom_components/elevenlabs/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"22459294496","text":"import pprint\nimport pytest\nimport os\n\nfrom gtmcore.fixtures import mock_labbook\nfrom gtmcore.files import FileOperations, FileOperationsException\n\n\nclass TestFileOps(object):\n def test_labbook_content_size_simply(self, mock_labbook):\n x, y, lb = mock_labbook\n\n lb_size = FileOperations.content_size(lb)\n # Make sure the new LB is about 10-30kB. This is about reasonable for a new, emtpy LB.\n assert lb_size > 10000\n assert lb_size < 30000\n\n\n def test_set_new_lb_section_for_large_files(self, mock_labbook):\n x, y, lb = mock_labbook\n\n assert FileOperations.is_set_untracked(labbook=lb, section='input') is False\n\n FileOperations.set_untracked(labbook=lb, section='input')\n\n assert FileOperations.is_set_untracked(labbook=lb, section='input') is True\n assert FileOperations.is_set_untracked(labbook=lb, section='code') is False\n\n # 1 - Ensure there are no untracked changes after the set operation\n s = lb.git.status()\n for key in s.keys():\n assert not s[key]\n\n # 2 - Add a file to the input directory using the old-fashioned add file op.\n with open('/tmp/unittestfile', 'w') as f:\n f.write('------------------------\\n')\n r = FileOperations.put_file(lb, section=\"input\", src_file=f.name, dst_path='')\n assert os.path.isfile(os.path.join(lb.root_dir, 'input', 'unittestfile'))\n\n # 3 - Make sure the new file exists but is not tracked (i.e., the git commit is the same)\n s = lb.git.status()\n for key in s.keys():\n assert not s[key]\n\n def test_with_the_whole_suite_of_file_operations_on_an_UNTRACKED_labbook(self, mock_labbook):\n x, y, lb = mock_labbook\n\n hash_0 = lb.git.commit_hash\n FileOperations.set_untracked(labbook=lb, section='input')\n hash_1 = lb.git.commit_hash\n assert hash_0 != hash_1\n\n with open('/tmp/unittestfile', 'wb') as f:\n f.write('àbčdęfghįjkłmñöpqrštūvwxÿż0123456789'.encode('utf-8'))\n assert not os.path.exists(os.path.join(lb.root_dir, 'input', 'unittestfile'))\n r = FileOperations.put_file(lb, section=\"input\", src_file=f.name, dst_path='')\n assert os.path.exists(os.path.join(lb.root_dir, 'input', 'unittestfile'))\n hash_2 = lb.git.commit_hash\n\n FileOperations.delete_files(lb, section='input', relative_paths=['unittestfile'])\n hash_3 = lb.git.commit_hash\n target_path = os.path.join(lb.root_dir, 'input', 'unittestfile')\n assert not os.path.exists(target_path)\n assert lb.is_repo_clean\n # Hash_2 == hash_3 because we delete a file in an UNTRACKED section\n assert hash_2 == hash_3\n\n FileOperations.makedir(lb, 'input/sample-untracked-dir/nested-dir')\n hash_4 = lb.git.commit_hash\n assert hash_3 == hash_4\n with open('/tmp/unittestfile', 'wb') as f:\n f.write('aaaaaæ'.encode('utf-8'))\n FileOperations.put_file(lb, section='input', src_file=f.name, dst_path='sample-untracked-dir/nested-dir')\n hash_5 = lb.git.commit_hash\n assert hash_4 == hash_5\n\n FileOperations.move_file(lb, section='input', src_rel_path='sample-untracked-dir/nested-dir/unittestfile', dst_rel_path='unittestfile')\n assert not os.path.exists(os.path.join(lb.root_dir, 'input', 'sample-untracked-dir/nested-dir/unittestfile'))\n assert os.path.exists(os.path.join(lb.root_dir, 'input', 'unittestfile'))\n hash_6 = lb.git.commit_hash\n assert hash_5 == hash_6\n\n FileOperations.delete_files(lb, section='input', relative_paths=['sample-untracked-dir/nested-dir'])\n hash_7 = lb.git.commit_hash\n assert hash_6 == hash_7\n\n","repo_name":"anibalsolon/gigantum-client","sub_path":"packages/gtmcore/gtmcore/files/tests/test_fileoperations.py","file_name":"test_fileoperations.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"4378050705","text":"from Chapter_3.MyQueue import *\n\n\nclass AnimalShelter:\n\n def __init__(self):\n self.dogsQueue = MyQueue()\n self.catsQueue = MyQueue()\n self.allAnimalsQueue = MyQueue()\n\n def enqueue(self, animalType, animalId):\n if animalType == \"dog\":\n self.dogsQueue.add(animalId)\n if animalType == \"cat\":\n self.catsQueue.add(animalId)\n\n animalInfo = {\n \"Type\": animalType,\n \"Id\": animalId\n }\n self.allAnimalsQueue.add(animalInfo)\n\n def dequeueAny(self):\n print(\"Dequeued next in line..............\")\n animalInfo = self.allAnimalsQueue.remove()\n animalType = animalInfo['Type']\n if animalType == \"dog\":\n self.dogsQueue.remove()\n if animalType == \"cat\":\n self.catsQueue.remove()\n\n def dequeueDog(self):\n print(\"Dog dequeued..............\")\n dequeuedId = self.dogsQueue.remove()\n self.removeAnimalFromGeneralQueue(dequeuedId)\n\n def dequeueCat(self):\n print(\"Cat dequeued..............\")\n dequeuedId = self.catsQueue.remove()\n self.removeAnimalFromGeneralQueue(dequeuedId)\n\n def printAllAnimals(self):\n print('')\n print(\"Current queue.............\")\n current = self.allAnimalsQueue.first\n while current is not None:\n print(current.data)\n current = current.next\n\n def removeAnimalFromGeneralQueue(self, animalId):\n if self.allAnimalsQueue.first.data[\"Id\"] == animalId:\n self.allAnimalsQueue.first = self.allAnimalsQueue.first.next\n else:\n current = self.allAnimalsQueue.first\n previous = current\n while current.data[\"Id\"] != animalId:\n previous = current\n current = current.next\n previous.next = current.next\n\n\nAnimalShelter = AnimalShelter()\nAnimalShelter.enqueue(\"dog\", 1)\nAnimalShelter.enqueue(\"dog\", 2)\nAnimalShelter.enqueue(\"cat\", 3)\nAnimalShelter.enqueue(\"cat\", 4)\n\nAnimalShelter.printAllAnimals()\n\nAnimalShelter.dequeueCat()\nAnimalShelter.printAllAnimals()\n\nAnimalShelter.dequeueAny()\nAnimalShelter.printAllAnimals()\n","repo_name":"LaloGarcia91/CrackingTheCodingInterview","sub_path":"Chapter_3/AnimalShelter.py","file_name":"AnimalShelter.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"19101007039","text":"# 20분\n# 내 처음 풀이\n# def solution(numbers, target):\n# answer = 0\n# bit_len = 2**len(numbers)\n# num_len = len(numbers)\n# for i in range(bit_len):\n# bit = bin(i)[2:].zfill(num_len)\n# tmp = 0\n# for j in range(num_len):\n# if bit[j] == '0':\n# tmp -= numbers[j]\n# elif bit[j] == '1':\n# tmp += numbers[j]\n# if tmp == target:\n# answer += 1\n# return answer\n\n# dfs 풀이\ndef solution(numbers, target):\n answer = 0\n def dfs(numbers, target, i, tmp):\n result = 0\n if i == len(numbers):\n if tmp == target:\n return 1\n else:\n return 0\n result += dfs(numbers, target, i + 1, tmp + numbers[i])\n result += dfs(numbers, target, i + 1, tmp - numbers[i])\n return result\n\n answer = dfs(numbers, target, 0, 0)\n return answer\n\n\nnumbers = [1, 1, 1, 1, 1]\ntarget = 3\n\nprint(solution(numbers, target))","repo_name":"syeeuns/algorithm","sub_path":"programmers/프로그래머스_타겟넘버.py","file_name":"프로그래머스_타겟넘버.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"12625453607","text":"import datetime\nfrom cassandra.cluster import Cluster\nfrom cassandra.query import SimpleStatement\nfrom ivetl.celery import app\nfrom ivetl.pipelines.task import Task\nfrom ivetl.models import InstitutionUsageStat, InstitutionUsageStatComposite, SubscriptionPricing, ProductBundle\nfrom ivetl.common import common\nfrom ivetl import utils\n\n\n@app.task\nclass UpdateInstitutionUsageStatsTask(Task):\n\n def run_task(self, publisher_id, product_id, pipeline_id, job_id, work_folder, tlogger, task_args):\n from_date = self.from_json_date(task_args.get('from_date'))\n\n if not from_date:\n from_date = datetime.datetime(2013, 1, 1)\n\n now = datetime.datetime.now()\n to_date = datetime.datetime(now.year, now.month, 1)\n\n cluster = Cluster(common.CASSANDRA_IP_LIST)\n session = cluster.connect()\n\n count = 0\n\n month_index = []\n for date in utils.month_range(from_date, to_date):\n month_index.append(date)\n\n publisher_stats_sql = \"\"\"\n select counter_type, subscriber_id, journal, usage_category, usage_date, journal_print_issn, journal_online_issn\n from impactvizor.institution_usage_stat\n where publisher_id = %s\n limit 100000000\n \"\"\"\n\n publisher_stats_statement = SimpleStatement(publisher_stats_sql, fetch_size=1000)\n\n total_count = 100000 # cheap estimate\n self.set_total_record_count(publisher_id, product_id, pipeline_id, job_id, total_count)\n\n for stat in session.execute(publisher_stats_statement, (publisher_id,)):\n # a very non-ideal way to filter out in code, \n # rather than via db query, to avoid Cassandra timeout issues\n # VIZOR-334\n if stat.counter_type != 'jr3' or stat.usage_date not in month_index:\n continue\n\n count = self.increment_record_count(publisher_id, product_id, pipeline_id, job_id, total_count, count)\n\n # get subscriptions for this pub and year\n matching_subscriptions = SubscriptionPricing.objects.filter(\n publisher_id=publisher_id,\n membership_no=stat.subscriber_id,\n year=stat.usage_date.year,\n )\n\n # find one with a matching ISSN\n match = None\n for subscription in matching_subscriptions:\n try:\n bundle = ProductBundle.objects.get(\n publisher_id=publisher_id,\n bundle_name=subscription.bundle_name,\n )\n\n issns = bundle.journal_issns\n if stat.journal_print_issn in issns or stat.journal_online_issn in issns:\n match = subscription\n break\n\n except ProductBundle.DoesNotExist:\n pass\n\n if match:\n InstitutionUsageStat.objects(\n publisher_id=publisher_id,\n counter_type='jr3',\n journal=stat.journal,\n subscriber_id=stat.subscriber_id,\n usage_date=stat.usage_date,\n usage_category=stat.usage_category,\n ).update(\n bundle_name=match.bundle_name,\n trial=match.trial,\n trial_expiration_date=match.trial_expiration_date,\n amount=match.amount,\n )\n\n # Note: we may in the future need to insert duplicate rows here if we end up supporting multiple matching bundles\n\n InstitutionUsageStatComposite.objects(\n publisher_id=publisher_id,\n counter_type='jr3',\n journal=stat.journal,\n subscriber_id=stat.subscriber_id,\n usage_date=stat.usage_date,\n usage_category=stat.usage_category,\n ).update(\n bundle_name=match.bundle_name,\n trial=match.trial,\n trial_expiration_date=match.trial_expiration_date,\n amount=match.amount,\n )\n\n self.pipeline_ended(publisher_id, product_id, pipeline_id, job_id, tlogger, show_alerts=task_args['show_alerts'])\n\n task_args['count'] = count\n return task_args\n","repo_name":"mauryavivekMPS/prod_code_clone","sub_path":"ivetl/pipelines/productbundles/tasks/update_institution_usage_stats.py","file_name":"update_institution_usage_stats.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14021426323","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 16 16:09:11 2023\r\n\r\n@author: Irving\r\n\"\"\"\r\n\r\nfrom datetime import datetime, timedelta\r\nfrom airflow import DAG\r\nfrom airflow.operators.postgres_operator import PostgresOperator\r\nfrom airflow.operators.python_operator import PythonOperator\r\nimport json\r\nimport pandas as pd\r\nfrom sqlalchemy import create_engine \r\n\r\n\r\n\r\ndefault_args = {\r\n 'owner': 'bankaya',\r\n 'depends_on_past': False,\r\n 'start_date': datetime(2023, 11, 17),\r\n 'email_on_failure': False,\r\n 'email_on_retry': False,\r\n 'retries': 1,\r\n 'retry_delay': timedelta(minutes=5),\r\n}\r\n\r\ndag = DAG(\r\n 'DAG_bankaya',\r\n default_args=default_args,\r\n description='Extracción, transformación y carga de datos',\r\n schedule_interval='0 0 * * *', # Ejecución diaria a las 12 am\r\n)\r\n\r\ndef extract_from_database(**kwargs):\r\n # Extrae datos de las tablas: Customer, Items, and Store\r\n # Suponiendo que las tablas se encuentran en una base de Postresql\r\n\r\n tasks = [\r\n PostgresOperator(\r\n task_id='extract_customer_data',\r\n postgres_conn_id='conn',\r\n sql=\"SELECT customer_id, customer_rfc FROM Customer\",\r\n dag=dag\r\n ),\r\n PostgresOperator(\r\n task_id='extract_items_data',\r\n postgres_conn_id='conn',\r\n sql=\"SELECT item_id, item_name FROM Items\",\r\n dag=dag\r\n ),\r\n PostgresOperator(\r\n task_id='extract_store_data',\r\n postgres_conn_id='conn',\r\n sql=\"SELECT store_id, store_name FROM Store\",\r\n \r\n dag=dag\r\n ),\r\n ]\r\n\r\n return tasks\r\n\r\ndef extract_from_json(**kwargs):\r\n # Se extrae data del archivo JSON suponiendo que se encuentra en un bucket en S3 \r\n\r\n def extract_json_data():\r\n source_bucket = 'source_bucket'\r\n source_key = 'source_key.json'\r\n\r\n with open(f\"/tmp/{source_key}\", \"r\") as json_file:\r\n json_data = json.load(json_file)\r\n return json_data\r\n\r\n extract_json_task = PythonOperator(\r\n task_id='extract_json_data',\r\n python_callable=extract_json_data,\r\n provide_context=True, \r\n dag=dag\r\n )\r\n\r\n return extract_json_task\r\n\r\n\r\n# Función de transformación y carga de datos\r\ndef transform_and_load_data(**kwargs):\r\n # Obtener el resultado de la tarea de extracción del contexto\r\n ti = kwargs['ti']\r\n json_data = ti.xcom_pull(task_ids='extract_json_data')\r\n customer_data = ti.xcom_pull(task_ids='extract_customer_data')\r\n items_data = ti.xcom_pull(task_ids='extract_items_data')\r\n store_data = ti.xcom_pull(task_ids='extract_store_data')\r\n\r\n # Convertir resultados de las tareas de extracción en DataFrames\r\n df_customer = pd.DataFrame(customer_data)\r\n df_items = pd.DataFrame(items_data)\r\n df_store = pd.DataFrame(store_data)\r\n df_json = pd.DataFrame(json_data)\r\n\r\n # Transformaciones de datos\r\n \r\n # Transformación 1: customer_rfc\r\n df_customer['customer_rfc'] = df_customer['customer_rfc'].str.upper()\r\n # Transformación 2: item_name\r\n df_items['item_name'] = df_items['item_name'].str.upper()\r\n df_items['item_name'] = df_items['item_name'].replace('[^\\w\\s]', '', regex=True)\r\n # Transformación 3: item_quantity_bought\r\n df_json['item_quantity_bought'] = df_json.groupby('item_id')['quantity'].transform('sum')\r\n # Transformación 4: store_name\r\n df_store['store_name'] = df_store['store_name'].str.upper()\r\n df_store['store_name'] = df_store['store_name'].replace('[^\\w\\s]', '', regex=True)\r\n # Transformación 5: total_bought\r\n df_json['total_bought'] = df_json.groupby('item_id')['total_price'].transform('sum') * 20 # Tasa de conversión a pesos mexicanos\r\n # Transformación 6: purchase_date\r\n df_json['purchase_date'] = pd.to_datetime(df_json['creation_timestamp']).dt.tz_localize('UTC').dt.tz_convert('America/Mexico_City').dt.strftime('%Y-%m-%d')\r\n\r\n # Unir DataFrames\r\n df_combined = pd.merge(df_json, df_customer, how='left', on='customer_id')\r\n df_combined = pd.merge(df_combined, df_store, how='left', on='store_id')\r\n df_combined = pd.merge(df_combined, df_items, how='left', on='item_id')\r\n \r\n # Cargar datos en Redshift\r\n # Suponiendo que la tabla final se encuentra en Redshift\r\n engine = create_engine('redshift+psycopg2://user:password@host:port/database')\r\n df_combined.to_sql('big_table', engine, index=False, if_exists='replace')\r\n\r\n# Tareas de extracción de datos\r\nextract_database_task = extract_from_database()\r\n\r\n# Tarea de extracción de datos del JSON\r\nextract_json_task = extract_from_json()\r\n\r\n# Tarea de transformación y carga de datos\r\ntransform_load_task = PythonOperator(\r\n task_id='transform_and_load_data',\r\n python_callable=transform_and_load_data,\r\n provide_context=True,\r\n dag=dag,\r\n)\r\n# Define el flujo de tareas\r\nextract_database_task >> extract_json_task >> transform_load_task # Define la dependencia entre las tareas\r\n\r\nif __name__ == \"__main__\":\r\n dag.cli()\r\n","repo_name":"Irvingr99/Bankaya","sub_path":"dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"39114838556","text":"class Solution:\n def countSubarrays(self, nums, k):\n ps, res, l, cs = [0]*len(nums), 0, 0, 0\n for i,v in enumerate(nums):\n cs += v\n ps[i] = cs\n while l <= i:\n ss, sl = (cs-ps[l-1] if l > 0 else cs), i-l+1\n #print('i', i, 'l', l, 'score', sl*ss, 'k', k, 'length', sl)\n if sl*ss < k: \n res += sl\n break\n else: l+=1\n return res\n","repo_name":"shankarkrishnamurthy/problem-solving","sub_path":"count-subarrays-with-score-less-than-k.py","file_name":"count-subarrays-with-score-less-than-k.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"34303690165","text":"#coding=gbk\n\nimport socket # 导入 socket 模块\nimport struct\nimport numpy as np\nfrom hmmlearn import hmm\n\nimport PersonData_class as pd\nimport Classifier as cl\n\nHMM=None #全局HMM模型\ndataList=None #全局feature矩阵\ncache=b''\n\ndef send(data_type=0,data=\"\"):\n #向终端发送data_type类型的数据,data为字符串\n global Socket\n #data = bytes(data, encoding=\"utf8\")\n data = bytes(data.encode('utf-8'))\n data = struct.pack('i',len(data))+struct.pack('i',data_type)+data\n Socket.send(data)\n\ndef parse():\n global HMM\n global dataList\n global cache\n cache_len=len(cache)\n if(cache_len<4):\n return False\n size=struct.unpack('i',cache[0:4])[0]\n if(cache_len<8+size):\n return False\n data_type=struct.unpack('i',cache[4:8])[0]\n data=cache[8:(8+size)]\n cache=cache[(8+size):]\n if(data_type==0):\n dataList=[]\n elif(data_type==1):\n result=struct.unpack(''.join(['f' for i in range(int(len(data)/4))]),data)\n #print(\"receive data len:\"+str(len(list(result))))\n dataList.append(result)\n elif(data_type==2):\n #print(dataList)\n send(3,HMM.predict(dataList))\n else:\n print(\"parse Error! \",size,data_type,data)\n return True\n\ndef recv(data):\n global cache\n cache+=data\n while(parse()):\n pass\n \ndef start():\n #读取训练数据并训练HMM\n s = pd.ActionDataSet()\n #添加数据到训练集只需反复s.LoadDataFromFile('文件名')即可\n s.LoadDataFromFile('train.txt')\n global HMM\n HMM = cl.Classifier()\n HMM.addDataFromADS(s)\n HMM.fit()\n\nif __name__ =='__main__':\n \n print(\"Begin Build HMM.\")\n start()\n \n while (True):\n s = socket.socket() # 创建 socket 对象\n host = \"\" # 获取本地主机名\n port = 29791 # 设置端口\n s.bind((host, port)) # 绑定端口\n s.listen(1) # 等待客户端连接\n\n print(\"End Build HMM. Listen(29791) now.\")\n Socket, addr = s.accept() # 建立客户端连接。\n print('Accept:', addr)\n while(True):\n data = Socket.recv(1024)\n if not data:\n break\n recv(data)\n","repo_name":"zsyzgu/ViveHMM","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"29163137828","text":"import json\nimport os\nimport csv\nimport datetime\nimport random\n\nimport asyncio\nfrom enum import Enum\nimport urllib.parse as urlparse\nfrom urllib.parse import parse_qs\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Travian:\n class PageUrl(Enum):\n login = 'login.php'\n dorf = 'dorf1.php'\n bid = 'hero.php?t=4'\n\n AutoBidList = {'Cage': 160, 'Ointment': 36, 'Small Bandage': 18}\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n self.session = requests.Session()\n self.session.headers.update(header)\n self.travian = 'https://tse.asia.travian.com'\n self.bid_csv_header = ('time', 'amount', 'name', 'bids', 'silver', 'silver_unit', 'created_at')\n\n async def login(self):\n print(\"開始登入...\")\n res = self.session.get(f'{self.travian}/{self.PageUrl.login.value}')\n soup = BeautifulSoup(res.text, 'html.parser')\n login_num = soup.find('input', attrs={'name': 'login'}).attrs['value']\n post_data = {'name': self.username, 'password': self.password, 's1': 'Login',\n 'w': '1440:900', 'login': login_num}\n self.session.post(f'{self.travian}/{self.PageUrl.login.value}', data=post_data)\n await asyncio.sleep(5)\n self.session.get(f'{self.travian}/{self.PageUrl.dorf.value}')\n\n async def go_bid_page(self) -> BeautifulSoup:\n print(\"跳轉至出價頁面...\")\n res = self.session.get(f'{self.travian}/{self.PageUrl.bid.value}')\n return BeautifulSoup(res.text, 'html.parser')\n\n async def fetch_bid_price(self):\n print(\"獲取出價價格...\")\n soup = await self.get_page(f'{self.travian}/{self.PageUrl.bid.value}&reload=auto')\n bid_table = soup.find('table')\n bid_tbody = bid_table.find('tbody')\n bid_trs = bid_tbody.find_all('tr')\n bid_result = []\n for bid_tr in bid_trs:\n amount, name = bid_tr.find(class_='name').text.strip().split('\\u202c×\\u202c')\n amount = int(amount.replace('\\u202d\\u202d', '').strip())\n name = name.strip()\n bids = int(bid_tr.find(class_='bids').text.strip())\n silver = int(bid_tr.find(class_='silver').text.strip())\n silver_unit = silver / amount\n time = int(bid_tr.find(class_='timer').attrs['value'])\n created_at = str(datetime.datetime.now()).split('.')[0]\n bid_a = bid_tr.find('a', class_='bidButton')\n bid_url = None\n bid_button = None\n if bid_a is not None:\n bid_url = f'{self.travian}{bid_a.attrs[\"href\"]}'\n bid_button = bid_a.text\n bid_result.append({'time': time, 'amount': amount, 'name': name,\n 'bids': bids, 'silver': silver, 'silver_unit': silver_unit,\n 'created_at': created_at, 'bid_url': bid_url, 'bid_button': bid_button})\n return bid_result\n\n async def fetch_farm_page(self):\n print(\"獲取農場名單\")\n\n async def get_page(self, url) -> BeautifulSoup:\n res = self.session.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n while soup.find('div', class_='innerLoginBox') is not None:\n print(\"嘗試重新登入...\")\n await self.login()\n await asyncio.sleep(3)\n res = self.session.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup\n\n async def auto_bid(self, bids):\n print('判斷是否自動出價,讀取出價列表...')\n auto_bid_list = self.AutoBidList\n if os.path.exists('bid.json'):\n with open('bid.json') as f:\n auto_bid_list = json.load(f)\n for bid in bids:\n if bid['name'] in auto_bid_list:\n name = bid['name']\n silver_unit = bid['silver_unit']\n max_silver_unit = auto_bid_list[name]\n amount = bid['amount']\n bid_silver = max_silver_unit * amount\n bid_url = bid['bid_url']\n bid_button = bid['bid_button']\n time = bid['time']\n else:\n continue\n\n if silver_unit < max_silver_unit and bid_button == 'bid' and bid_url and time <= 600:\n print(f\"開始出價: {bid['name']} 嘗試價格為: {bid_silver}...\")\n soup = await self.get_page(bid['bid_url'])\n submit_bit = soup.find('div', class_='submitBid')\n if submit_bit:\n try:\n bid_as = soup.find_all('a', class_='bidButton')\n z = None\n for bid_a in bid_as:\n z_href = bid_a.attrs['href']\n if parse_qs(urlparse.urlparse(z_href).query).get('z') is not None:\n z = parse_qs(urlparse.urlparse(z_href).query)['z'][0]\n break\n if z is not None:\n a = parse_qs(urlparse.urlparse(bid_url).query)['a'][0]\n post_data = {'page': 1, 'filter': '', 'action': 'but', 'z': z, 'a': a,\n 'maxBid': bid_silver}\n self.session.post(f'{self.travian}/hero.php?t=4', data=post_data)\n else:\n print(\"找不到 z 參數\")\n except Exception as e:\n print(f\"出價失敗: {e}\")\n else:\n print(\"找不到出價提交\")\n\n async def save_bid_to_csv(self, auto_bid=False):\n print(\"紀錄出價啟動...\")\n if not os.path.exists('bid.csv'):\n print('找不到bid.csv,建立新檔案')\n with open('bid.csv', 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.bid_csv_header)\n writer.writeheader()\n while True:\n bid_result = await self.fetch_bid_price()\n _auto_bid_task = None\n if auto_bid:\n _auto_bid_task = asyncio.create_task(self.auto_bid(bid_result))\n min_time = 0\n with open('bid.csv', 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.bid_csv_header)\n for bid in bid_result:\n if bid['time'] <= 30:\n print(f\"紀錄資料:{bid}\")\n writer.writerow({k: v for k, v in bid.items() if k in self.bid_csv_header})\n else:\n min_time = bid['time'] - 15\n break\n min_time = max(min_time, 15)\n wait_time = min(random.randint(120, 300), min_time)\n print(f'waiting... {wait_time} seconds')\n await asyncio.sleep(wait_time)\n if _auto_bid_task:\n await _auto_bid_task\n\n async def main(self):\n await asyncio.gather(self.save_bid_to_csv(auto_bid=True))\n\n\nif __name__ == \"__main__\":\n t = Travian(username=os.environ['T_USER'], password=os.environ['T_PASS'])\n asyncio.run(t.save_bid_to_csv(auto_bid=True))\n","repo_name":"yenchenLiu/travian","sub_path":"travian.py","file_name":"travian.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"19960058624","text":"\"\"\"\n 实现了的项目需求:\n 1、\n 从命令行参数给出:论文原文的文件的绝对路径。\n 从命令行参数给出:抄袭版论文的文件的绝对路径。\n 从命令行参数给出:输出的答案文件的绝对路径。\n 2、\n genhash算法\n 3、\n 对中文文本查重实现\n ————2020.9.22~2020.9.23\n\"\"\"\nimport re\nimport sys\nimport jieba\nfrom gensim import corpora, models, similarities\nfrom collections import defaultdict\nfrom gensim.similarities import Similarity\n\nif __name__ == '__main__':\n\n #命令行参数传入:main.py、[论文原文的文件的绝对路径]、[抄袭版论文的文件的绝对路径]、[输出的答案文件的绝对路径]\n text1_abs_path = sys.argv[1]\n text2_abs_path = sys.argv[2]\n save_abs_path = sys.argv[3]\n\n #过滤非中文\n filter = re.compile(u\"[^a-zA-Z0-9\\u4e00-\\u9fa5]\")\n\n #文件操作\n with open(text1_abs_path,'r',encoding='UTF-8') as f1,open(text2_abs_path,'r',encoding='UTF-8') as f2:\n\n #读取文件内容\n doc1=f1.read()\n doc2=f2.read()\n\n #对文本中的非中文内容等进行删去\n doc1 = filter.sub('',doc1)\n doc2 = filter.sub('',doc2)\n # 将文本载入一个列表,待分词\n\n#将文本1 jieba 分词,转向量\ndoc1_list = [jieba.lcut(doc1)]\ndic = corpora.Dictionary(doc1_list)\nnum_fea = len(dic.token2id)\ncorpus = [dic.doc2bow(doc1) for doc1 in doc1_list]\ndoc2_vec = dic.doc2bow(jieba.lcut(doc2))\n\n#计算相似度\nsimilarity = Similarity('-Similarity-index', corpus, num_fea)\ncalculation = similarity[doc2_vec]#\nprint(\"两篇文章的相似度约为:%.5s\" %str(calculation[0]*100) +\"%\")\n\n# 输出结果写入指定文档\nwith open(save_abs_path,'w',encoding='UTF-8') as f3:\n\n f3.write(\"两个文本的相似度:%s\" %calculation[0]*100 +\"\")\n f3.close()\n\n","repo_name":"4672513gg/3118005358","sub_path":"3118005358/第一版本/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"16888934133","text":"#coding:utf-8\nimport xadmin\nfrom xadmin import views\nfrom models import *\n\nfrom DjangoUeditor.models import UEditorField\nfrom DjangoUeditor.widgets import UEditorWidget\nfrom xadmin.views import BaseAdminPlugin, ModelFormAdminView, DetailAdminView\nfrom django.conf import settings\nfrom django.db.models import TextField\n\nclass GlobalSetting(object):\n\t#设置base_site.html的Title\n\tsite_title = '场外市场评选活动后台管理系统'\n\n\tdef get_site_menu(self):\n\n\t\treturn (\n {'title': '清大益讯','menus':(\n {'title': '评委管理', 'icon': 'fa fa-user', 'url': self.get_model_url(JUDGE, 'changelist')},\n {'title': '行业分类', 'icon': 'fa fa-file', 'url': self.get_model_url(INDUSTRY, 'changelist')},\n {'title': '参评企业', 'icon': 'fa fa-bank', 'url': self.get_model_url(COMPANY, 'changelist')},\n {'title': '投票记录', 'icon': 'fa fa-inbox', 'url': self.get_model_url(VOTEPHONE, 'changelist')},\n )},\n )\n\n\n\nclass JUDGEAdmin(object):\n\tlist_display = ['name','company','position','sort']\n\tsearch_fields = ['name']\n\tlist_editable = ['name', 'company', 'position','sort']\n\nclass INDUSTRYAdmin(object):\n\tlist_display = ['name','sort']\n\tsearch_fields = ['name']\n\tlist_editable = ['name','sort']\n\nclass COMPANYAdmin(object):\n\tlist_display = ['name','code','industry','vote_type','vote_status','video','vote','sort']\n\tsearch_fields = ['name']\n\tlist_editable = ['name','industry','vote_type','vote_status','video','sort']\n\tlist_filter=['industry','vote_type','vote_status']\n\tstyle_fields = {'detail':'ueditor'}\n\tlist_per_page = 20\n\tordering = ['sort']\n\nclass RANDOMCODEAdmin(object):\n\tlist_display = ['tel','code','time','count']\n\nclass VOTEPHONEAdmin(object):\n\tlist_display = ['tel','time','vote_type','company']\n\n\nclass XadminUEditorWidget(UEditorWidget):\n\tdef __init__(self,**kwargs):\n\t\tself.ueditor_options=kwargs\n\t\tself.Media.js = None\n\t\tsuper(XadminUEditorWidget,self).__init__(kwargs)\n\nclass UeditorPlugin(BaseAdminPlugin):\n\tdef get_field_style(self, attrs, db_field, style, **kwargs):\n\t\tif style == 'ueditor':\n\t\t\tif isinstance(db_field, UEditorField):\n\t\t\t\twidget = db_field.formfield().widget\n\t\t\t\tparam = {}\n\t\t\t\tparam.update(widget.ueditor_settings)\n\t\t\t\tparam.update(widget.attrs)\n\t\t\t\treturn {'widget': XadminUEditorWidget(**param)}\n\t\t\tif isinstance(db_field, TextField):\n\t\t\t\treturn {'widget': XadminUEditorWidget}\n\t\treturn attrs\n\tdef block_extrahead(self, context, nodes):\n\t\tjs = '' % (settings.STATIC_URL + \"ueditor/ueditor.config.js\")\n\t\tjs += '' % (settings.STATIC_URL + \"ueditor/ueditor.all.min.js\")\n\t\tnodes.append(js)\n\n\nxadmin.site.register(JUDGE,JUDGEAdmin)\nxadmin.site.register(INDUSTRY,INDUSTRYAdmin)\nxadmin.site.register(COMPANY,COMPANYAdmin)\n#xadmin.site.register(RANDOMCODE,RANDOMCODEAdmin)\nxadmin.site.register(VOTEPHONE,VOTEPHONEAdmin)\nxadmin.site.register(views.CommAdminView, GlobalSetting)\nxadmin.site.register_plugin(UeditorPlugin,DetailAdminView)\nxadmin.site.register_plugin(UeditorPlugin,ModelFormAdminView)","repo_name":"congyuandong/Vote","sub_path":"qdvote/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"27234579808","text":"#!/usr/local/bin/python3\n\nimport base64\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.serialization import load_pem_public_key\nfrom cryptography.hazmat.primitives.asymmetric import padding\nimport os\n\ndef hash_msg(message):\n digest = hashes.Hash(hashes.SHA256())\n digest.update(message)\n return digest.finalize()\n\ndef sign_msg(message, key):\n return key.sign(\n message,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256() \n )\n\ndef load_privateKey(path):\n with open(path, 'rb') as f:\n pem_data = f.read()\n return serialization.load_pem_private_key(pem_data, password=b\"passphrase\")\n\ndef load_cert(path):\n with open(path, 'rb') as f:\n pem_data = f.read()\n return x509.load_pem_x509_certificate(pem_data, default_backend())\n\ndef encrypt_msg(message, certificat):\n publicKey = certificat.public_key()\n\n return publicKey.encrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n\ndef save_to_file(contenu, message, filename):\n with open(os.open(filename, os.O_CREAT | os.O_WRONLY, 0o1600), 'wb+') as msg_obj:\n msg_obj.write(message)\n msg_obj.close()\n print(\"[+] \" + contenu + \" stocké dans le fichier: \" + filename)\n\n## input\nmessage = input(\"Votre message à chiffre: \")\ninputCert = input(\"Indiquez le certificat du destinataire: \")\ninputPrivateKey = input(\"Indiquez votre clé privée: \")\ninputCiphertextFilename = input(\"Nom de fichier du message chiffré: \")\ninputSignatureFilename = input(\"Nom de fichier de la signature: \")\n\n## initialisation\nmessage64 = str.encode(message)\ncertC2 = load_cert(inputCert + \".pem\")\nprivateKey = load_privateKey(inputPrivateKey + \".pem\")\n\n## chiffrement msg\nciphertext = encrypt_msg(message64, certC2)\nciphertext64 = base64.b64encode(ciphertext)\n\n## sauvegarde message chiffré\nsave_to_file(\"message chiffré\", ciphertext64, inputCiphertextFilename)\n\n## signature empreinte msg\ndigest = hash_msg(message64) # empreinte message\ndigest64 = base64.b64encode(digest)\nsignMsg = sign_msg(digest, privateKey)\nsignMsg64 = base64.b64encode(signMsg)\n\n## sauvegarde empreinte signé\nsave_to_file(\"empreinte signée\", signMsg64, inputSignatureFilename)\n\n","repo_name":"Meulon/RT0802","sub_path":"sendMsg.py","file_name":"sendMsg.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7614825398","text":"\"\"\"\n使用栈\n1. len(str) 然后开始循环整个字符串\n2. [ ( { 入栈 ] ) } 出栈 出栈时比较是否对应 否则返回false\n3. 循环完整个字符串之后 栈必须为空 否则false\n\n空字符串可被认为是有效字符串,所以第一步判空\n\"\"\"\n\n\nclass Solution:\n def isValid(self, s: str) -> bool:\n if len(s) == 0:\n return True\n d = {'}': '{', ']': '[', ')': '('}\n l = []\n length = len(s)\n i = 0\n j = 0 # 栈指针\n\n while (i < length):\n if s[i] in ['{', '[', '(']: # 入栈\n l.append(s[i])\n j += 1\n elif s[i] in ['}', ']', ')']: # 出栈\n if len(l) and l[j-1] == d.get(s[i]):\n l.pop()\n j -= 1\n else:\n return False\n\n else:\n return False\n\n i += 1\n\n if len(l)==0: # 最后不为空 则出错\n return True\n else:\n return False\n\ndef main():\n result = Solution().isValid(\"{[]}\")\n print(result)\n\n\nif __name__ == '__main__':\n main()\n\n\n\"\"\"\n附上大佬的代码\nclass Solution:\n def isValid(self, s):\n while '{}' in s or '()' in s or '[]' in s:\n s = s.replace('{}', '')\n s = s.replace('[]', '')\n s = s.replace('()', '')\n return s == ''\n\"\"\"\n","repo_name":"Timelomo/leetcode","sub_path":"20 Valid Parentheses.py","file_name":"20 Valid Parentheses.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"34085886151","text":"import re\r\nstring = 'TCAATGCATGCGGGTCTATATGCAT'\r\n\r\n\r\ndef reverse_complementation(something):\r\n complement = ''\r\n for a in something:\r\n if a == 'A':\r\n complement += 'T'\r\n if a == 'T':\r\n complement += 'A'\r\n if a == 'C':\r\n complement += 'G'\r\n if a == 'G':\r\n complement += 'C'\r\n return complement[::-1] # return complement in reverse\r\n\r\n# Completely substring string, sort by length, store in list\r\ntemp = []\r\nfor i in range(len(string)):\r\n for j in range(i+1, len(string)):\r\n temp.append((string[i:j+1]))\r\nsubstring_list = (sorted(temp, key=len))\r\n\r\n\r\n# Collect those substrings of the correct length in list\r\nreal_substrings = []\r\nfor b in substring_list:\r\n if 4 <= len(b) <= 12:\r\n real_substrings.append(b)\r\n\r\n# Take the usable substrings and get their reverse complements\r\nreversals = []\r\nfor z in real_substrings:\r\n reversals.append(reverse_complementation(z))\r\n\r\n\r\n# If a substring equals its reverse complement, its a restriction site. Collect those substrings.\r\nrestriction_sites = []\r\nfor i in range(len(real_substrings)):\r\n if real_substrings[i] == reversals[i]:\r\n restriction_sites.append(real_substrings[i])\r\n\r\n\r\ndef output_maker():\r\n for x in restriction_sites:\r\n for m in re.finditer(x, string):\r\n print(m.start()+1, len(x))\r\n\r\n\r\noutput_maker()\r\n\r\n# I used an online duplicate line remover to clean the data, sorry!","repo_name":"SidG13/miscellaneous_code","sub_path":"Rosalind_Problems/REVP.py","file_name":"REVP.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73700580208","text":"import sys\r\nfrom itertools import combinations\r\nfrom collections import deque\r\n\r\nsys.setrecursionlimit(1000000)\r\nN, M, D = map(int, sys.stdin.readline().split())\r\nlocations = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\r\ndelta = ((0, -1), (-1, 0), (0, 1))\r\nresult = 0\r\nenemy = sum([locations[i].count(1) for i in range(N)])\r\n\r\n\r\ndef go():\r\n last_row = locations[-1][:]\r\n num = last_row.count(1)\r\n # print(\"num\", num)\r\n for i in range(N-2, -1, -1):\r\n locations[i+1] = locations[i][:]\r\n locations[0] = [0 for _ in range(M)]\r\n\r\n for i in range(N):\r\n for j in range(M):\r\n if locations[i][j] < 0:\r\n locations[i][j] = 0\r\n\r\n return [last_row, num]\r\n\r\ndef back(last_row):\r\n for i in range(0, N-1):\r\n locations[i] = locations[i+1][:]\r\n locations[-1] = last_row\r\n # print(\"복구\", locations)\r\n return\r\n\r\ndef attack(idx, points, total, enemies):\r\n global locations\r\n # print(\"attack\", enemies)\r\n # print(idx, points, total)\r\n # for location in locations:\r\n # print(location)\r\n # print(locations)\r\n if idx >= len(points) or enemies <= 0:\r\n if enemies <= 0:\r\n global result\r\n result = max(result, total)\r\n # print(\"result\", result, \"total\", total)\r\n else:\r\n # print(\"---------------------------\")\r\n # print(locations)\r\n # print(\"1111111111111\")\r\n last_row, num = go()\r\n # print(locations, last_row)\r\n attack(0, points, total, enemies-num)\r\n # print(\"2222222222222\")\r\n back(last_row)\r\n # print(locations)\r\n # print(\"---------------------------\")\r\n return\r\n\r\n visited = [[False for j in range(M)] for i in range(N)]\r\n queue = deque()\r\n queue.append((N, points[idx]))\r\n\r\n while queue:\r\n x, y = queue.popleft()\r\n # print(x, y, abs(x-N) + abs(y-points[idx]))\r\n\r\n if abs(x-N) + abs(y-points[idx]) >= D:\r\n continue\r\n\r\n for dx, dy in delta:\r\n xx, yy = x+dx, y+dy\r\n # print(\"xx\", xx, \"yy\", yy)\r\n if 0 <= xx < N and 0 <= yy < M and not visited[xx][yy]:\r\n # print(\">>>>>\")\r\n if abs(locations[xx][yy]) == 1:\r\n if locations[xx][yy] < 0:\r\n attack(idx+1, points, total, enemies)\r\n else:\r\n locations[xx][yy] = -1\r\n attack(idx+1, points, total+1, enemies-1)\r\n locations[xx][yy] = 1\r\n return\r\n\r\n visited[xx][yy] = True\r\n queue.append((xx, yy))\r\n\r\n attack(idx+1, points, total, enemies)\r\n return\r\n\r\n\r\nfor points in combinations(range(M), 3):\r\n # print(\"start\", enemy)\r\n attack(0, points, 0, enemy)\r\n\r\nprint(result)","repo_name":"Jeeyoun-S/Problem-Solving","sub_path":"백준/Gold/17135. 캐슬 디펜스/캐슬 디펜스.py","file_name":"캐슬 디펜스.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73380949488","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import SensorData, ThresholdConditions\nfrom .serializers import SensorDataSerializer\nfrom .models import ThresholdConditions\nfrom .serializers import ThresholdConditionsSerializer\n\n# class ReceiveSensorData(APIView):\n# #make one check api jo contract se call ho\n# def post(self, request, format=None):\n# serializer = SensorDataSerializer(data=request.data)\n# if serializer.is_valid():\n# serializer.save()\n\n# # Fetch the shipment_id from the request data\n# shipment_id = serializer.validated_data.get('shipment_id')\n\n# # Fetch the threshold conditions from the database based on the shipment_id\n# try:\n# conditions = ThresholdConditions.objects.get(shipment_id=shipment_id)\n# except ThresholdConditions.DoesNotExist:\n# conditions = None\n\n# if conditions:\n# temperature = serializer.validated_data['temperature']\n# humidity = serializer.validated_data['humidity']\n\n# # Check the sensor data against the threshold conditions\n# temperature_upper_limit = conditions.temperature_upper_limit\n# temperature_lower_limit = conditions.temperature_lower_limit\n# humidity_upper_limit = conditions.humidity_upper_limit\n# humidity_lower_limit = conditions.humidity_lower_limit\n\n# if (\n# temperature >= temperature_lower_limit and\n# temperature <= temperature_upper_limit and\n# humidity >= humidity_lower_limit and\n# humidity <= humidity_upper_limit\n# ):\n# condition = 'Good'\n# else:\n# condition = 'Bad'\n# else:\n# condition = 'Threshold conditions not found for the specified shipment_id.'\n\n# return Response({'condition': condition}, status=status.HTTP_201_CREATED)\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass ReceiveSensorData(APIView):\n def post(self, request, format=None):\n serializer = SensorDataSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \nclass CheckCondition(APIView):\n # def post(self, request, format=None):\n def get(self, request, format=None):\n # shipment_id = request.data.get('shipment_id')\n\n shipment_id = request.GET.get('shipment_id')\n\n # Fetch the threshold conditions for the specified shipment_id\n try:\n conditions = ThresholdConditions.objects.get(shipment_id=shipment_id)\n except ThresholdConditions.DoesNotExist:\n return Response({'condition_ok': False, 'message': 'Threshold conditions not found for the specified shipment_id.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Fetch all SensorData instances for the specified shipment_id\n sensor_data_instances = SensorData.objects.filter(shipment_id=shipment_id)\n\n if not sensor_data_instances:\n return Response({'condition_ok': False, 'message': 'No SensorData found for the specified shipment_id.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Check if all instances are within the threshold\n for instance in sensor_data_instances:\n temperature = instance.temperature\n humidity = instance.humidity\n\n temperature_upper_limit = conditions.temperature_upper_limit\n temperature_lower_limit = conditions.temperature_lower_limit\n humidity_upper_limit = conditions.humidity_upper_limit\n humidity_lower_limit = conditions.humidity_lower_limit\n\n if (\n temperature < temperature_lower_limit or\n temperature > temperature_upper_limit or\n humidity < humidity_lower_limit or\n humidity > humidity_upper_limit\n ):\n return Response({'condition_ok': False, 'message': 'Threshold crossed for the specified shipment_id.'}, status=status.HTTP_200_OK)\n\n # If all instances are within the threshold\n return Response({'condition_ok': True}, status=status.HTTP_200_OK)\n\nclass SetThresholdConditions(APIView):\n def post(self, request, format=None):\n serializer = ThresholdConditionsSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \nclass CheckKar(APIView):\n def get(self, request, format=None): \n return Response({'condition_ok': True, 'message': 'Within the Threshold'}, status=status.HTTP_200_OK)","repo_name":"SaranshBaniyal/logix","sub_path":"backend/shipping/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72761046140","text":"plt.figure(figsize=(14, 4.5))\n\ndef oversight_L(X):\n return abs(f(X)-L(X))\n\nplt.subplot(1, 2, 1)\nplt.grid()\n\nX_rez = np.arange(-np.pi/3, np.pi/3+0.01, 0.01)\nY_rez = [oversight_L(x_i) for x_i in X_rez]\n\nplt.plot(X_rez, Y_rez, color = \"blue\")\nplt.subplot(1, 2, 2)\nplt.grid()\n\nfor i in range(10):\n X_rez = np.arange(x[i], x[i+1]+0.01, 0.01)\n Y_rez = [abs(f(x_i) - S(a,b,c,d,h,i,x_i)) for x_i in X_rez]\n plt.plot(X_rez, Y_rez, color = \"red\")\n","repo_name":"arroneq/KPI-IPT-subjects","sub_path":"Numerical Methods/Lab 5/Code/Error.py","file_name":"Error.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"35650858243","text":"\n# coding: utf-8\n\n# *CAVEAT: Sorry but just note this notebook can be a bit slow to load probably due to the Plotly embeddings displaying a large number of points*\n# \n# #Introduction\n# \n# There already exists a plethora of notebooks discussing the merits of dimensionality reduction methods, in particular the Big 3 of PCA (Principal Component Analysis), LDA ( Linear Discriminant Analysis) and TSNE ( T-Distributed Stochastic Neighbour Embedding). Quite a handful of these have compared one to the other but few have gathered all 3 in one go. Therefore this notebook will aim to provide an introductory exposition on these 3 methods as well as to portray their visualisations interactively and hopefully more intuitively via the Plotly visualisation library. The chapters are structuredas follows:\n# \n# 1. **Principal Component Analysis ( PCA )** - Unsupervised, linear method\n# \n# \n# 2. **Linear Discriminant Analysis (LDA)** - Supervised, linear method\n# \n# \n# 3. **t-distributed Stochastic Neighbour Embedding (t-SNE)** - Nonlinear, probabilistic method\n# \n# Lets go.\n\n# In[ ]:\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport plotly.offline as py\npy.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\nimport plotly.tools as tls\nimport seaborn as sns\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport matplotlib\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# Import the 3 dimensionality reduction methods\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n\n\n# **Curse of Dimensionality & Dimensionality Reduction**\n# \n# The term \"Curse of Dimensionality\" has been oft been thrown about, especially when PCA, LDA and TSNE is thrown into the mix. This phrase refers to how our perfectly good and reliable Machine Learning methods may suddenly perform badly when we are dealing in a very high-dimensional space. But what exactly do all these 3 acronyms do? They are essentially transformation methods used for dimensionality reduction. Therefore, if we are able to project our data from a higher-dimensional space to a lower one while keeping most of the relevant information, that would make life a lot easier for our learning methods.\n\n# # MNIST Dataset\n# \n# For the purposes of this interactive guide, the MNIST (Mixed National Institute of Standards and Technology) computer vision digit dataset was chosen partly due to its simplicity and also surprisingly deep and informative research that can be done with the dataset. So let's load the training data and see what we have\n\n# In[ ]:\n\n\ntrain = pd.read_csv('../input/train.csv')\ntrain.head()\n\n\n# In[ ]:\n\n\nprint(train.shape)\n\n\n# The MNIST set consists of 42,000 rows and 785 columns. There are 28 x 28 pixel images of digits ( contributing to 784 columns) as well as one extra label column which is essentially a class label to state whether the row-wise contribution to each digit gives a 1 or a 9. Each row component contains a value between one and zero and this describes the intensity of each pixel. \n\n# **Pearson Correlation Plot**\n# \n# Since we are still having the problem that our dataset consists of a relatively large number of features (columns), it is perfect time to introduce Dimensionality Reduction methods. Before we start off, let's conduct some cleaning of the train data by saving the label feature and then removing it from the dataframe\n\n# In[ ]:\n\n\n# save the labels to a Pandas series target\ntarget = train['label']\n# Drop the label feature\ntrain = train.drop(\"label\",axis=1)\n\n\n# # 1. Principal Component Analysis (PCA)\n# \n# In a nutshell, PCA is a linear transformation algorithm that seeks to project the original features of our data onto a smaller set of features ( or subspace ) while still retaining most of the information. To do this the algorithm tries to find the most appropriate directions/angles ( which are the principal components ) that maximise the variance in the new subspace. Why maximise the variance though? \n# \n# To answer the question, more context has to be given about the PCA method. One has to understand that the principal components are orthogonal to each other ( think right angle ). As such when generating the covariance matrix ( measure of how related 2 variables are to each other ) in our new subspace, the off-diagonal values of the covariance matrix will be zero and only the diagonals ( or eigenvalues) will be non-zero. It is these diagonal values that represent the *variances* of the principal components that we are talking about or information about the variability of our features. \n# \n# Therefore when PCA seeks to maximise this variance, the method is trying to find directions ( principal components ) that contain the largest spread/subset of data points or information ( variance ) relative to all the data points present. For a brilliant and detailed description on this, check out this stackexchange thread: \n# \n# [PCA and proportion of variance explained][1] by amoeba\n# \n# [1]: http://stats.stackexchange.com/a/140579/3277\n\n# ### Calculating the Eigenvectors\n# \n# Now it may be informative to observe how the variances look like for the digits in the MNIST dataset. Therefore to achieve this, let us calculate the eigenvectors and eigenvalues of the covarience matrix as follows:\n\n# In[ ]:\n\n\n# Standardize the data\nfrom sklearn.preprocessing import StandardScaler\nX = train.values\nX_std = StandardScaler().fit_transform(X)\n\n# Calculating Eigenvectors and eigenvalues of Cov matirx\nmean_vec = np.mean(X_std, axis=0)\ncov_mat = np.cov(X_std.T)\neig_vals, eig_vecs = np.linalg.eig(cov_mat)\n# Create a list of (eigenvalue, eigenvector) tuples\neig_pairs = [ (np.abs(eig_vals[i]),eig_vecs[:,i]) for i in range(len(eig_vals))]\n\n# Sort the eigenvalue, eigenvector pair from high to low\neig_pairs.sort(key = lambda x: x[0], reverse= True)\n\n# Calculation of Explained Variance from the eigenvalues\ntot = sum(eig_vals)\nvar_exp = [(i/tot)*100 for i in sorted(eig_vals, reverse=True)] # Individual explained variance\ncum_var_exp = np.cumsum(var_exp) # Cumulative explained variance\n\n\n# Now having calculated both our Individual Explained Variance and Cumulative Explained Variance values, let's use the Plotly visualisation package to produce an interactive chart to showcase this.\n\n# In[ ]:\n\n\ntrace1 = go.Scatter(\n x=list(range(784)),\n y= cum_var_exp,\n mode='lines+markers',\n name=\"'Cumulative Explained Variance'\",\n hoverinfo= cum_var_exp,\n line=dict(\n shape='spline',\n color = 'goldenrod'\n )\n)\ntrace2 = go.Scatter(\n x=list(range(784)),\n y= var_exp,\n mode='lines+markers',\n name=\"'Individual Explained Variance'\",\n hoverinfo= var_exp,\n line=dict(\n shape='linear',\n color = 'black'\n )\n)\nfig = tls.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.5}],\n print_grid=True)\n\nfig.append_trace(trace1, 1, 1)\nfig.append_trace(trace2,1,1)\nfig.layout.title = 'Explained Variance plots - Full and Zoomed-in'\nfig.layout.xaxis = dict(range=[0, 80], title = 'Feature columns')\nfig.layout.yaxis = dict(range=[0, 60], title = 'Explained Variance')\nfig['data'] += [go.Scatter(x= list(range(784)) , y=cum_var_exp, xaxis='x2', yaxis='y2', name = 'Cumulative Explained Variance')]\nfig['data'] += [go.Scatter(x=list(range(784)), y=var_exp, xaxis='x2', yaxis='y2',name = 'Individual Explained Variance')]\n\n# fig['data'] = data\n# fig['layout'] = layout\n# fig['data'] += data2\n# fig['layout'] += layout2\npy.iplot(fig, filename='inset example')\n\n\n# *PLEASE CLICK AND MOVE THE SCATTER PLOTS ABOVE. THEY ARE INTERACTIVE. DOUBLE CLICK TO GET BACK TO THE ORIGINAL VIEW*\n\n# **Takeaway from the Plot**\n# \n# There are two plots above, a smaller one embedded within the larger plot. The smaller plot ( Green and Red) shows the distribution of the Individual and Explained variances across all features while the larger plot ( Golden and black ) portrays a zoomed section of the explained variances only.\n# \n# As we can see, out of our 784 features or columns approximately 90% of the Explained Variance can be described by using just over 200 over features. So if one wanted to implement a PCA on this, extracting the top 200 features would be a very logical choice as they already account for the majority of the data.\n# In the section below, I will use the immensely powerful Sklearn toolkit and its built-in PCA method. Unfortunately for brevity I will not be covering how to implement PCA from scratch, partly due to the multitude of resources already available. One excellent article to check out for this would be:\n# \n# [Principal Component Analysis in 3 Simple Steps][1] by Sebastian Raschka \n# \n# [1]: http://sebastianraschka.com/Articles/2015_pca_in_3_steps.html\n\n# **Visualizing the Eigenvalues**\n# \n# As alluded to above, since the PCA method seeks to obtain the optimal directions (or eigenvectors) that captures the most variance ( spreads out the data points the most ). Therefore it may be informative ( and cool) to visualise these directions and their associated eigenvalues. For the purposes of this notebook and for speed, I will invoke PCA to only extract the top 30 eigenvalues ( using Sklearn's .components_ call) from the digit dataset and visually compare the top 5 eigenvalues to some of the other smaller ones to see if we can glean any insights as follows:\n\n# In[ ]:\n\n\n# Invoke SKlearn's PCA method\nn_components = 30\npca = PCA(n_components=n_components).fit(train.values)\n\neigenvalues = pca.components_.reshape(n_components, 28, 28)\n\n# Extracting the PCA components ( eignevalues )\n#eigenvalues = pca.components_.reshape(n_components, 28, 28)\neigenvalues = pca.components_\n\n\n# In[ ]:\n\n\nn_row = 4\nn_col = 7\n\n# Plot the first 8 eignenvalues\nplt.figure(figsize=(13,12))\nfor i in list(range(n_row * n_col)):\n# for offset in [10, 30,0]:\n# plt.subplot(n_row, n_col, i + 1)\n offset =0\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(eigenvalues[i].reshape(28,28), cmap='jet')\n title_text = 'Eigenvalue ' + str(i + 1)\n plt.title(title_text, size=6.5)\n plt.xticks(())\n plt.yticks(())\nplt.show()\n\n\n# **Takeaway from the Plots**\n# \n# The subplots above portray the top 30 optimal directions or principal component axes that the PCA method has decided to generate for our digit dataset. Of interest is when one compares the first component \"Eigenvalue 1\" to the 28th component \"Eigenvalue 28\", it is obvious that more complicated directions or components are being generated in the search to maximise variance in the new feature subspace.\n\n# **Visualising the MNIST Digit set on its own**\n# \n# Now just for the fun and curiosity of it, let's plot the actual MNIST digit set to see what the underlying dataset actually represents, rather than being caught up with just looking at 1 and 0's.\n\n# In[ ]:\n\n\n# plot some of the numbers\nplt.figure(figsize=(14,12))\nfor digit_num in range(0,70):\n plt.subplot(7,10,digit_num+1)\n grid_data = train.iloc[digit_num].as_matrix().reshape(28,28) # reshape from 1d to 2d pixel array\n plt.imshow(grid_data, interpolation = \"none\", cmap = \"afmhot\")\n plt.xticks([])\n plt.yticks([])\nplt.tight_layout()\n\n\n# Phew, they are definitely digits all right. So let's proceed onto the main event.\n\n# ###PCA Implementation via Sklearn\n# \n# Now using the Sklearn toolkit, we implement the Principal Component Analysis algorithm as follows:\n\n# In[ ]:\n\n\n# Delete our earlier created X object\ndel X\n# Taking only the first N rows to speed things up\nX= train[:6000].values\ndel train\n# Standardising the values\nX_std = StandardScaler().fit_transform(X)\n\n# Call the PCA method with 5 components. \npca = PCA(n_components=5)\npca.fit(X_std)\nX_5d = pca.transform(X_std)\n\n# For cluster coloring in our Plotly plots, remember to also restrict the target values \nTarget = target[:6000]\n\n\n# What the chunk of code does above is to first normalise the data (actually no need to do so for this data set as they are all 1's and 0's) using Sklearn's convenient StandardScaler call.\n# \n# Next we invoke Sklearn's inbuilt PCA function by providing into its argument *n_components*, the number of components/dimensions we would like to project the data onto. In practise, one would educate and motivate the choice of components for example by looking at the proportion of variance captured vs each feature's eigenvalue, such as in our Explained Variance plots. To be honest, there are a multitude of papers in the literature with research on what should be a good indicator on choice of components. Here are some references for the interested: However for the essence of this notebook being a guide of sorts, I have just decided to take a PCA on 5 components ( against perhaps taking 200 over components).\n# \n# Finally I call both fit and transform methods which fits the PCA model with the standardised digit data set and then does a transformation by applying the dimensionality reduction on the data.\n\n# ###Interactive visualisations of PCA representation\n# \n# When it comes to these dimensionality reduction methods, scatter plots are most commonly implemented because they allow for great and convenient visualisations of clustering ( if any existed ) and this will be exactly what we will be doing as we plot the first 2 principal components as follows:\n\n# In[ ]:\n\n\ntrace0 = go.Scatter(\n x = X_5d[:,0],\n y = X_5d[:,1],\n name = Target,\n hoveron = Target,\n mode = 'markers',\n text = Target,\n showlegend = False,\n marker = dict(\n size = 8,\n color = Target,\n colorscale ='Jet',\n showscale = False,\n line = dict(\n width = 2,\n color = 'rgb(255, 255, 255)'\n ),\n opacity = 0.8\n )\n)\ndata = [trace0]\n\nlayout = go.Layout(\n title= 'Principal Component Analysis (PCA)',\n hovermode= 'closest',\n xaxis= dict(\n title= 'First Principal Component',\n ticklen= 5,\n zeroline= False,\n gridwidth= 2,\n ),\n yaxis=dict(\n title= 'Second Principal Component',\n ticklen= 5,\n gridwidth= 2,\n ),\n showlegend= True\n)\n\n\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='styled-scatter')\n\n\n# *PLEASE CLICK AND MOVE THE SCATTER PLOTS ABOVE. THEY ARE INTERACTIVE. DOUBLE CLICK TO GET BACK TO THE ORIGINAL VIEW*\n\n# **Takeaway from the Plot**\n# \n# As observed from the scatter plot, you can just about make out a few discernible clusters evinced from the collective blotches of colors. These clusters represent the underlying digit that each data point should contribute to and one may therefore be tempted to think that it was quite a piece of cake in implementing and visualising PCA in this section.\n# \n# However, the devil lies in the tiny details of the python implementation because as alluded to earlier, PCA is actually in fact an unsupervised method which does not depend on class labels. I have sneakily snuck in class labelings whilst generating the scatter plots therefore resulting in the clusters of colours as you see them.\n\n# ###K-Means Clustering to identify possible classes\n# \n# Imagine just for a moment that we were not provided with the class labels to this digit set because after all PCA is an unsupervised method. Therefore how would we be able to separate out our data points in the new feature space? We can apply a clustering algorithm on our new PCA projection data and hopefully arrive at distinct clusters which would tell us something about the underlying class separation in the data. \n# \n# To start off, we set up a KMeans clustering method with Sklearn's *KMeans* call and use the *fit_predict* method to compute cluster centers and predict cluster indices for the first and second PCA projections (to see if we can observe any appreciable clusters).\n\n# In[ ]:\n\n\nfrom sklearn.cluster import KMeans # KMeans clustering \n# Set a KMeans clustering with 9 components ( 9 chosen sneakily ;) as hopefully we get back our 9 class labels)\nkmeans = KMeans(n_clusters=9)\n# Compute cluster centers and predict cluster indices\nX_clustered = kmeans.fit_predict(X_5d)\n\ntrace_Kmeans = go.Scatter(x=X_5d[:, 0], y= X_5d[:, 1], mode=\"markers\",\n showlegend=False,\n marker=dict(\n size=8,\n color = X_clustered,\n colorscale = 'Portland',\n showscale=False, \n line = dict(\n width = 2,\n color = 'rgb(255, 255, 255)'\n )\n ))\n\nlayout = go.Layout(\n title= 'KMeans Clustering',\n hovermode= 'closest',\n xaxis= dict(\n title= 'First Principal Component',\n ticklen= 5,\n zeroline= False,\n gridwidth= 2,\n ),\n yaxis=dict(\n title= 'Second Principal Component',\n ticklen= 5,\n gridwidth= 2,\n ),\n showlegend= True\n)\n\ndata = [trace_Kmeans]\nfig1 = dict(data=data, layout= layout)\n# fig1.append_trace(contour_list)\npy.iplot(fig1, filename=\"svm\")\n\n\n# **Takeaway from the Plot**\n# \n# Visually, the clusters generated by the KMeans algorithm appear to provide a clearer demarcation amongst clusters as compared to naively adding in class labels into our PCA projections. This should come as no surprise as PCA is meant to be an unsupervised method and therefore not optimised for separating different class labels. This particular task however is accomplished by the very next method that we will talk about.\n\n# #2. Linear Discriminant Analysis (LDA)\n# \n# LDA, much like PCA is also a linear transformation method commonly used in dimensionality reduction tasks. However unlike the latter which is an unsupervised learning algorithm, LDA falls into the class of supervised learning methods. As such the goal of LDA is that with available information about class labels, LDA will seek to maximise the separation between the different classes by computing the component axes (linear discriminants ) which does this.\n\n# In[ ]:\n\n\nfrom IPython.display import display, Math, Latex\n\n\n# ### LDA Implementation from Scratch\n# \n# The objective of LDA is to preserve the class separation information whilst still reducing the dimensions of the dataset. As such implementing the method from scratch can roughly be split into 4 distinct stages as below. As an aside, since this section will be quite equation heavy therefore we will also be embedding some mathematical equations into the upcoming sections. The good thing about IPython notebook is that you can render your equations (LaTeX) automatically by putting them within the **$$** symbol, courtesy of the use of MathJax - a JavaScript equation display engine.\n# \n# **A. Projected Means**\n# \n# Since this method was designed to take into account class labels we therefore first need to establish a suitable metric with which to measure the 'distance' or separation between different classes. Let's assume that we have a set of data points *x* that belong to one particular class *w*. Therefore in LDA the first step is to the project these points onto a new line, Y that contains the class-specific information via the transformation \n# \n# $$Y = \\omega^\\intercal x $$\n# \n# With this the idea is to find some method that maximises the separation of these new projected variables. To do so, we first calculate the projected mean.\n# \n# **B. Scatter Matrices and their solutions**\n# Having introduced our projected means, we now need to find a function that can represent the difference between the means and then maximise it. Like in linear regression, where the most basic case is to find the line of best fit we need to find the equivalent of the variance in this context. And hence this is where we introduce scatter matrices where the scatter is the equivalent of the variance.\n# \n# $$ \\tilde{S}^{2} = (y - \\tilde{mu})^{2}$$\n# \n# \n# **C. Selecting Optimal Projection Matrices**\n# \n# **D. Transforming features onto new subspace**\n\n# *SECTION STILL UNDER-WAY*\n\n# ###LDA Implementation via Sklearn\n# \n# Having gone through the nitty-gritty details of the LDA implementation in theory, let us now implement the method in practise. Surprise, surprise we find that the Sklearn toolkit also comes with its own inbuilt LDA function and hence we invoke an LDA model as follows:\n\n# In[ ]:\n\n\nlda = LDA(n_components=5)\n# Taking in as second argument the Target as labels\nX_LDA_2D = lda.fit_transform(X_std, Target.values )\n\n\n# The syntax for the LDA implementation is very much akin to that of PCA whereby one calls the fit and transform methods which fits the LDA model with the data and then does a transformation by applying the LDA dimensionality reduction to it. However since LDA is a supervised learning algorithm , there is a second argument to the method that the user must provide and this would be the class labels, which in this case is the target labels of the digits.\n\n# ###Interactive visualisations of LDA representation\n\n# In[ ]:\n\n\n8# Using the Plotly library again\ntraceLDA = go.Scatter(\n x = X_LDA_2D[:,0],\n y = X_LDA_2D[:,1],\n name = Target,\n# hoveron = Target,\n mode = 'markers',\n text = Target,\n showlegend = True,\n marker = dict(\n size = 8,\n color = Target,\n colorscale ='Jet',\n showscale = False,\n line = dict(\n width = 2,\n color = 'rgb(255, 255, 255)'\n ),\n opacity = 0.8\n )\n)\ndata = [traceLDA]\n\nlayout = go.Layout(\n title= 'Linear Discriminant Analysis (LDA)',\n hovermode= 'closest',\n xaxis= dict(\n title= 'First Linear Discriminant',\n ticklen= 5,\n zeroline= False,\n gridwidth= 2,\n ),\n yaxis=dict(\n title= 'Second Linear Discriminant',\n ticklen= 5,\n gridwidth= 2,\n ),\n showlegend= False\n)\n\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='styled-scatter')\n\n\n# *PLEASE CLICK AND MOVE THE SCATTER PLOTS ABOVE. THEY ARE INTERACTIVE. DOUBLE CLICK TO GET BACK TO THE ORIGINAL VIEW*\n\n# From the scatter plot above, we can see that the data points are more clearly clustered when using LDA with as compared to implementing PCA with class labels. This is an inherent advantage in having class labels to supervise the method with. In short picking the right tool for the right job.\n\n# #3. T-SNE ( t-Distributed Stochastic Neighbour Embedding )\n# \n# The t-SNE method has become widely popular ever since it was introduced by van der Maaten and Hinton in 2008. Unlike the previous two linear methods of PCA and LDA discussed above, t-SNE is a non-linear, probabilistic dimensionality reduction method.\n# \n# The internal mechanisms of the algorithm \n# Therefore instead of looking at directions/axes which maximise information or class separation, T-SNE aims to convert the Euclidean distances between points into conditional probabilities. A Student-t distribution is then applied on these probabilities which serve as metrics to calculate the similarity between one datapoint to another. \n# \n# However this brief summary does no justice in any manner or shape to the original t-SNE paper by Maaten and Hinton so please do check the original out [here][1].\n# \n# [1]: http://www.cs.toronto.edu/~hinton/absps/tsne.pdf\n\n# In[ ]:\n\n\n# Invoking the t-SNE method\ntsne = TSNE(n_components=2)\ntsne_results = tsne.fit_transform(X_std) \n\n\n# Having invoked the t-SNE algorithm by simply calling *TSNE()* we fit the digit data to the model and reduce its dimensions with *fit_transform*. Finally let's plot the first two components in the new feature space in a scatter plot\n\n# In[ ]:\n\n\ntraceTSNE = go.Scatter(\n x = tsne_results[:,0],\n y = tsne_results[:,1],\n name = Target,\n hoveron = Target,\n mode = 'markers',\n text = Target,\n showlegend = True,\n marker = dict(\n size = 8,\n color = Target,\n colorscale ='Jet',\n showscale = False,\n line = dict(\n width = 2,\n color = 'rgb(255, 255, 255)'\n ),\n opacity = 0.8\n )\n)\ndata = [traceTSNE]\n\nlayout = dict(title = 'TSNE (T-Distributed Stochastic Neighbour Embedding)',\n hovermode= 'closest',\n yaxis = dict(zeroline = False),\n xaxis = dict(zeroline = False),\n showlegend= False,\n\n )\n\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='styled-scatter')\n\n\n# *PLEASE CLICK AND MOVE THE SCATTER PLOTS ABOVE. THEY ARE INTERACTIVE. DOUBLE CLICK TO GET BACK TO THE ORIGINAL VIEW*\n\n# **Takeaway from the Plots**\n# \n# From the t-SNE scatter plot the first thing that strikes is that clusters ( and even subclusters ) are very well defined and segregated resulting in Jackson-Pollock like Modern Art visuals, even more so than the PCA and LDA methods. This ability to provide very good cluster visualisations can be boiled down to the topology-preserving attributes of the algorithm. \n# \n# However t-SNE is not without its drawbacks. Multiple local minima may occur as the algorithm is identifying clusters/sub-clusters and this can be evinced from the scatter plot, where we can see that clusters of the same colour exist as 2 sub-clusters in different areas of the plot.\n\n# # Concluding Remarks\n# \n# In conclusion, this notebook has introduced and briefly covered three different dimensionality reduction methods commonly used by ML practitioners - PCA, LDA and t-SNE. We touched on the concepts of finding principal components and linear discriminants as well as the topology preserving capabilities of t-SNE. We've also discussed the relative merits of using supervised and unsupervised methods as well as the KMeans clustering technique when it comes to an unsupervised scenario. \n# \n# Apart from these three common reduction methods, there exists a whole host of other dimensionality reduction methods not discussed in this notebook. Just to name a few, they include methods like Sammon's Mapping, Multi-dimensional Scaling or even some graph based visualisation methods. \n# \n# I hope this notebook has been useful especially with regards to introducing the concepts of dimensionality reduction if it is new to you. And the key takeaway would be that each method has its own pros and cons and are not to be used as a one-size-fits-all/I-will-only-use-my-favourite method but rather to be implemented as the situation calls for it. \n# \n# Peace out \n","repo_name":"adgirish/kaggleScape","sub_path":"data/script364.py","file_name":"script364.py","file_ext":"py","file_size_in_byte":26472,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"23"} +{"seq_id":"72492680700","text":"#!/usr/bin/python3.6\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n给定一个整型数组, 数组元素随机无序的, 要求打印出所有元素右边第一个大于该元素的值, 如果不存在, 输出None\n\n如数组A=[1,5,3,6,4,8,9,10] 输出[5, 6, 6, 8, 8, 9, 10, None]\n\n如数组A=[8, 2, 5, 4, 3, 9, 7, 2, 5] 输出[9, 5, 9, 9, 9, None, None, 5, None]\n\n要求: 时间复杂度为O(n)\n\"\"\"\n\n\ndef solution1(nums: list) -> list:\n # O(n ^ 2)\n ans = []\n for idx, i in enumerate(nums[:-1]):\n v = None\n for j in nums[idx + 1:]:\n if j > i:\n v = j\n break\n ans.append(v)\n ans.append(None)\n return ans\n\n\ndef solution2(nums: list) -> list:\n # O(n), 单调栈 - 递减\n size = len(nums)\n ans = [None] * size\n # 栈里面存放索引\n stack = [0]\n idx = 1\n while idx < size:\n while stack and idx < size and nums[idx] < nums[stack[-1]]: # 单调递减\n stack.append(idx)\n idx += 1\n # end while\n while stack and idx < size and nums[idx] > nums[stack[-1]]: # 弹出元素直至单调递减\n ans[stack[-1]] = nums[idx]\n stack.pop()\n # end while\n stack.append(idx)\n idx += 1\n # end while\n return ans\n\nif __name__ == '__main__':\n import random\n a = [random.randint(0, 100) for _ in range(30)]\n r1 = solution1(a)\n r2 = solution2(a)\n print(r1 == r2)\n print(a)\n print(r1)\n print(r2)\n","repo_name":"liujunsheng0/notes","sub_path":"lintcode/right_first_bigger.py","file_name":"right_first_bigger.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"23"} +{"seq_id":"17561990900","text":"import asyncio\n\nfrom bluefly.motor import MotorDevice\nfrom bluefly.simprovider import SimProvider\n\n\ndef sim_motor_logic(\n p: SimProvider, motor: MotorDevice, velocity=1, precision=3, units=\"mm\"\n):\n mr = motor.motor\n p.set_value(mr.velocity, velocity)\n p.set_value(mr.max_velocity, velocity)\n p.set_value(mr.precision, precision)\n p.set_value(mr.egu, units)\n\n task = None\n\n @p.on_set(mr.demand)\n async def do_move(new_position):\n async def actually_do_move():\n p.set_value(mr.done_move, 0)\n old_position = p.get_value(mr.readback)\n velocity = p.get_value(mr.velocity)\n # Don't try to be clever, just move at a constant velocity\n move_time = (new_position - old_position) / velocity\n for i in range(int(move_time / 0.1)):\n p.set_value(mr.readback, old_position + i * 0.1 * velocity)\n await asyncio.sleep(0.1)\n p.set_value(mr.readback, new_position)\n p.set_value(mr.done_move, 1)\n\n nonlocal task\n task = asyncio.create_task(actually_do_move())\n try:\n await task\n except asyncio.CancelledError:\n pass\n\n @p.on_call(mr.stop)\n async def do_stop():\n if task:\n task.cancel()\n","repo_name":"dls-controls/bluefly","sub_path":"bluefly/motor_sim.py","file_name":"motor_sim.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"44170542939","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport copy, os\n\nthisDir = os.getcwd()\n#read the current pyqt version\nf = open('../pychoacoustics/pyqtver.py', 'r')\npyqtverLines = f.readlines()\npyqtverLinesPyside = copy.copy(pyqtverLines)\nf.close()\nfor i in range(len(pyqtverLinesPyside)):\n if pyqtverLinesPyside[i].strip().split('=')[0].strip() == \"pyqtversion\":\n pyqtverLinesPyside[i] = \"pyqtversion = -4\\n\"\n\n#Change pyqtver to pyside\nf = open('../pychoacoustics/pyqtver.py', 'w')\nf.writelines(pyqtverLinesPyside)\nf.close()\n\nos.system('pyside-rcc -py3 -o ../pychoacoustics/qrc_resources.py ../resources.qrc')\nos.system('pyside-lupdate -verbose pychoacoustics.pro')\nos.system('lrelease -verbose pychoacoustics.pro')\nos.system('mv *.qm ../translations/')\n\nos.chdir('../')\nos.system('python3 setup-pyside.py sdist --formats=gztar,zip')\n#os.system('python3 setup-pyside.py bdist_wininst')\n\n#revert to pyqt5\n\nfor i in range(len(pyqtverLines)):\n if pyqtverLines[i].strip().split('=')[0].strip() == \"pyqtversion\":\n pyqtverLines[i] = \"pyqtversion = 5\\n\"\n\nos.chdir('prep-release')\nf = open('../pychoacoustics/pyqtver.py', 'w')\nf.writelines(pyqtverLines)\nf.close()\n\nos.system('pyrcc5 -py3 -o ../pychoacoustics/qrc_resources.py ../resources.qrc')\nos.system('pylupdate5 -verbose pychoacoustics.pro')\nos.system('lrelease -verbose pychoacoustics.pro')\nos.system('mv *.qm ../translations/')\n","repo_name":"VibrasticLab/pychoacoustics","sub_path":"prep-release/do_pyside.py","file_name":"do_pyside.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"23"} +{"seq_id":"427479070","text":"from queue import Queue\nfrom typing import Sequence, Union, List\n\nfrom rlcard.envs import Env\nimport tensorflow as tf\n\nfrom rlcard_thousand_schnapsen.api.dto import PlayerType\nfrom rlcard_thousand_schnapsen.api.agents import HumanAgent, DeepCFRAgent, RandomAgent\n\nAgent = Union[HumanAgent, DeepCFRAgent, RandomAgent]\n\n\ndef load_agent(player_type: PlayerType, index: int, env: Env, sess: tf.Session,\n action_queue: Queue) -> Agent:\n if player_type == PlayerType.Human:\n return HumanAgent(action_queue)\n if player_type == PlayerType.DeepCfr:\n return DeepCFRAgent(sess, scope='deep_cfr' + str(index), env=env)\n return RandomAgent(env.action_num)\n\n\ndef load_agents(player_types: Sequence[PlayerType], env: Env, sess: tf.Session,\n action_queue: Queue) -> List[Agent]:\n return [\n load_agent(player_type, index, env, sess, action_queue)\n for index, player_type in enumerate(player_types)\n ]\n","repo_name":"adomanska/rlcard-thousand-schnapsen","sub_path":"rlcard_thousand_schnapsen/api/utils/load_agents.py","file_name":"load_agents.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"39687254353","text":"import discord\r\n\r\nfrom discord.ext import commands\r\n\r\nTOKEN = \"\"\r\n\r\nclient = commands.Bot(command_prefix = \"-\") #prefix \r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"bot is ready as {0.user}\".format(client))\r\n\r\n#works now\r\n@client.event\r\nasync def on_message(message):\r\n username = str(message.author).split(\"#\")[0] \r\n user_message = str(message.content)\r\n channel = str(message.channel.name)\r\n print(f\"{username} in ({channel}): {user_message}\")\r\n\r\n if message.author == client.user:\r\n return\r\n \r\n #Hello\r\n if user_message.lower() == \"hello\": \r\n await message.channel.send(f\"Hello {username}\")\r\n return \r\n \r\n #Respond\r\n elif user_message.lower() == \"respond\":\r\n await message.author.send(f\"**_responded_**!\")\r\n return\r\n \r\n #Club tag\r\n elif user_message.lower() == \"club tag\":\r\n await message.channel.send(f\"#VCYJV99Q\")\r\n return\r\n\r\n#has not been tested\r\n@client.event\r\nasync def on_member_join(member):\r\n print(f\"{member} welcome to the server\")\r\n\r\n#has not been tested\r\n@client.event\r\nasync def on_member_leave(member):\r\n print(f\"Sad to see you go {member} \")\r\n\r\n\r\n#issue\r\n@client.command()\r\nasync def ping(ctx):\r\n await ctx.send(\"pong\")\r\n\r\n\r\nclient.run(TOKEN)","repo_name":"Sehnyu/Discord-py-bot-issus","sub_path":"git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"20530341254","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras.models import load_model\nimport keras.backend as K\nfrom PIL import Image as im\nimport os\n\n# Set size of images to be used\nshape2 = (180, 180) # Only uses width and height\nshape3 = (180, 180, 1) # Includes depth\nUPLOAD_FOLDER='./upload/'\nmodel_loc=\"DogCatCNN3-19.tf\"\n#imageLoc = \"images/Chloe1.jpg\"\nfinalImg = \"\"\nprediction = \"\"\n\ndef imageProcessing(imageLoc):\n preproc_image = tf.keras.preprocessing.image.load_img(\n imageLoc, color_mode=\"grayscale\", target_size=shape2\n )\n imgArr = image.img_to_array(preproc_image)\n imgEx = np.expand_dims(imgArr, axis=0)\n return imgEx\n finalImg = imgEx\n #predict(imgEx)\n\n\ndef predict(filename):\n path = os.path.join(UPLOAD_FOLDER, filename)\n img = imageProcessing(path)\n model = load_model(model_loc)\n prediction = model.predict(img)\n d = prediction.flatten()\n j = d.max()\n if(j > 0.5):\n return \"Dog!\"\n prediction = \"Dog!\"\n elif (j == 0.5):\n return \"Too close to call!\"\n prediction = \"Too close to call!\"\n else:\n return \"Cat!\"\n prediction = \"Cat!\"\n","repo_name":"ZCKaufman/DogOrCatClassifier","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"25048952153","text":"\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom dwt.wlcbam import wa_module\n# 基本卷积块\nclass Conv(nn.Module):\n def __init__(self, C_in, C_out):\n super(Conv, self).__init__()\n self.layer = nn.Sequential(\n\n nn.Conv2d(C_in , C_out, 3, 1, 1),\n nn.BatchNorm2d(C_out),\n # 防止过拟合\n nn.Dropout(0.3),\n nn.LeakyReLU(),\n\n nn.Conv2d(C_out, C_out, 3, 1, 1),\n nn.BatchNorm2d(C_out),\n # 防止过拟合\n nn.Dropout(0.4),\n nn.LeakyReLU(),\n )\n\n def forward(self, x):\n return self.layer(x)\n\n\n# 下采样模块\nclass DownSampling(nn.Module):\n def __init__(self, C):\n super(DownSampling, self).__init__()\n self.Down = nn.Sequential(\n # 使用卷积进行2倍的下采样,通道数不变\n nn.Conv2d(C, C, 3, 2, 1),\n nn.LeakyReLU()\n )\n\n def forward(self, x):\n return self.Down(x)\n\n# 基于小波变换的下采样技术\nclass DownSampling_wa(nn.Module):\n def __init__(self, C,device):\n super(DownSampling_wa, self).__init__()\n self.dev=device\n self.wa=wa_module(device=self.dev)\n self.Up = nn.Conv2d(C, C // 2, 1, 1)\n self.pool = nn.MaxPool2d(kernel_size=2)\n def forward(self, x):\n down_fea,skip_fea=self.wa(x)[0],self.wa(x)[1]\n return down_fea,skip_fea\n\n\n\n# 上采样模块\nclass UpSampling(nn.Module):\n\n def __init__(self, C):\n super(UpSampling, self).__init__()\n # 特征图大小扩大2倍,通道数减半\n self.Up = nn.Conv2d(C, C // 2, 1, 1)\n\n def forward(self, x, r):\n up = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n x = self.Up(up)\n return torch.cat((x, r), 1)\n\n\n# 主干网络\nclass denosing_module(nn.Module):\n\n def __init__(self,device,inchannel):\n super(denosing_module, self).__init__()\n self.dev=device\n # 4次下采样\n self.C1 = Conv(inchannel, 64)\n self.D1 = DownSampling_wa(64,device=self.dev)\n # self.D1 = DownSampling(64)\n self.C2 = Conv(64, 128)\n self.D2 = DownSampling_wa(128,device=self.dev)\n # self.D2 = DownSampling(128)\n self.C3 = Conv(128, 256)\n self.D3 = DownSampling_wa(256,device=self.dev)\n # self.D3 = DownSampling(256)\n self.C4 = Conv(256, 512)\n self.U2 = UpSampling(512)\n self.C7 = Conv(512, 256)\n self.U3 = UpSampling(256)\n self.C8 = Conv(256, 128)\n self.U4 = UpSampling(128)\n self.C9 = Conv(128, 64)\n\n self.Th = torch.nn.Sigmoid()\n self.pred = torch.nn.Conv2d(64, 1, 3, 1, 1)\n\n\n # 这是采用小波变换模块的前向传播\n def forward(self, x):\n R1 = self.C1(x)#torch.Size([1, 64, 512, 512])\n R2 = self.C2(self.D1(R1)[0])#torch.Size([1, 128, 256, 256])\n R3 = self.C3(self.D2(R2)[0])#torch.Size([1, 256, 128, 128])\n R4 = self.C4(self.D3(R3)[0])#torch.Size([1, 512, 64, 64])\n\n # skip feature是小波变换的底层信号\n O2 = self.C7(self.U2(R4,self.C3(self.D2(R2)[1])))\n O3 = self.C8(self.U3(O2,self.C2(self.D1(R1)[1])))\n O4 = self.C9(self.U4(O3,R1))\n\n ## 没有小波变换的skip feature\n # O2 = self.C7(self.U2(R4, R3))\n # O3 = self.C8(self.U3(O2, R2))\n # O4 = self.C9(self.U4(O3, R1))\n\n return self.Th(self.pred(O4))\n\n # def forward(self, x):\n # R1 = self.C1(x)\n # R2 = self.C2(self.D1(R1))\n # R3 = self.C3(self.D2(R2))\n # R4 = self.C4(self.D3(R3))\n\n # O2 = self.C7(self.U2(R4, R3))\n # O3 = self.C8(self.U3(O2, R2))\n # O4 = self.C9(self.U4(O3, R1))\n\n # return self.Th(self.pred(O4))\n \nif __name__=='__main__':\n x=torch.randn(1,1,512,512)\n model=denosing_module(device='cpu',inchannel=1)(x)\n print(model)\n print(model.shape)","repo_name":"xuecheng990531/EWSNet","sub_path":"models/refine.py","file_name":"refine.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"23"} +{"seq_id":"36491994256","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 10 13:38:06 2021\n\n@author: lijiaojiao\n\n社会网络分析之中心性分析:\n度数中心性(degree centrality)、接近中心性(clonessness centrality)、中介中心性(betweenness centrality)、特征向量中心性(eigenvector_centrality)以及PageRank\n\n\"\"\"\nfrom operator import itemgetter\nimport networkx as nx\nimport matplotlib.pyplot as plot\n\n######## 1. 读取数据 ##########\ng = nx.read_pajek(\"D:\\\\program\\\\networks\\data\\\\russians.net\")\nlens = len(g) # 读取图的节点数\n# print(lens)\n\n# 各节点中心性(度数中心性、接近中心性、中介中心性)值的排序\ndef sorted_map(map):\n ms = sorted(map.items(),key= lambda d:d[1],reverse=True) # lambda d:d[1]按照字典中的值排序,reverse=True降序排序\n return ms\n\n\n######## 2.度数中心性(degree centrality)计算 ##########\n\n#度数中心性的计算\nd1 = nx.degree(g) # d1为网络图的数据,主要是节点及边的数量,其类型为networkx.classes.reportviews.MultiDegreeView,是Iterator类型。\nd = dict(d1) #deg 是Iterator类型,需要转换为dict类型,才可以进行进一步计算\n#度数中心性的最大最小值\nd_min = min(d.values()) #度数中心性(degree centrality)的最小值\nd_max = max(d.values()) #度数中心性(degree centrality)的最大值\n#度数中心性的排序\nds = sorted_map(d) #各节点的度数中心性(degree centrality)的列表,降序排序\nprint(\"度数中心性前10的节点为:\" )\nprint(ds[0:10])\n#各节点度数中心性的分布情况\nh = plot.hist(dict(d).values(),100)\nprint(plot.loglog(h[1][1:],h[0]))\n\n# 删除指定节点(例如,度数为1的节点),返回新的网络图\ndef trim_degrees(g,degree):\n g2 = g.copy()\n d = nx.degree(g2)\n for n in list(g2.nodes()):\n if d[n] <= degree:\n g2.remove_node(n)\n return g2\n\n# 删除度数为1的节点\ncore1 = trim_degrees(g,1)\n\n# 删除度数为10 的节点\ncore10 = trim_degrees(g,10)\nlen(core10)\n\n#绘制度数大于10 的节点的网络图(中心性网络图)\nprint(nx.draw(core10,with_labels=True))\n\n\n######## 3.接近中心性(clonessness centrality)计算 ##########\n\nc = nx.closeness_centrality(core10) #计算接近中心性\ncs = sorted_map(c) #对结果进行排序\nprint(\"接近中心性前10的节点为:\" )\nprint(cs[0:10])\n\nprint(plot.hist(dict(c).values()) ) #接近中心性的分布\n\n######## 4.中介中心性 betweenness centrality计算 ##########\n\ncore11 = nx.DiGraph(core10) #数据类型转化\nb = nx.betweenness_centrality(core11) # 计算中介中心性\nbs = sorted_map(b) #排序\nprint(\"中介中心性前10的节点为:\" )\nprint(bs[:10])\n\n\n######## 5. 三种中心性的合并 ##########\n#创建列表,存储三种不同中心性测量值的前10\nname1 = [x[0] for x in ds[:10]]\nname2 = [x[0] for x in cs[:10]]\nname3 = [x[0] for x in bs[:10]]\n# 使用Python的set函数将三组列表拼到一起\nnames = list(set(name1) | set(name2) | set(name3))\n#创建中心性列表。 d,c,b分别为对应的字典\ntable = [[name,d[name],c[name],b[name]] for name in names]\nprint(\"该网络图度数中心性(degree centrality)、接近中心性(clonessness centrality)、接近中心性(clonessness centrality)、中介中心性(betweenness centrality)为:\" )\nprint(table)\n\n\n######## 6. 特征向量中心性(eigenvector_centrality)计算 ##########\ne = nx.eigenvector_centrality(core11) #计算特征向量中心性\nes = sorted_map(e) #排序\nprint(\"特征向量中心性前10的节点为:\" )\nprint(es[:10])\n\n\n######## 7. PageRank 算法 ##########\np = nx.pagerank(core11)\npr = sorted_map(p)\nprint(\"PageRank 算法计算得前10的节点为:\" )\nprint(pr[:10])\n\n######## 5. 五种中心性的合并 ##########\n#创建列表,存储五种不同中心性测量值的前10\nname1 = [x[0] for x in ds[:10]]\nname2 = [x[0] for x in cs[:10]]\nname3 = [x[0] for x in bs[:10]]\nname4 = [x[0] for x in es[:10]]\nname5 = [x[0] for x in pr[:10]]\n# 使用Python的set函数将五组列表拼到一起\nnames = list(set(name1) | set(name2) | set(name3) | set(name4) | set(name5))\n#创建中心性列表。:\ntable = [[name,d[name],c[name],b[name],e[name],p[name]] for name in names]\nprint(\"五种中心性列表为:\" )\nprint(table)","repo_name":"ljiaoer/networks","sub_path":"centrality/nx_centrality.py","file_name":"nx_centrality.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"14461768505","text":"import os\nimport setuptools\n\ndescription = 'Simulates the time evolution of a 1-dimensional wave packet in an arbitrary time-independent potential'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nwith open('requirements.txt') as f:\n required = f.read().splitlines()\n\nsetuptools.setup(\n name='wave_packet_dynamics',\n version='0.2.0',\n url='https://github.com/Rastow/wave-packet-dynamics',\n author='Robert Grzonka',\n author_email='robert.grzonka@fau.de',\n description=description,\n long_description=long_description,\n long_description_content_type='text/x-rst',\n keywords=[\"quantum-mechanics\", \"quantum-chemistry\", \"physics-simulation\", \"schroedinger-equation\"],\n packages=setuptools.find_packages(),\n license='MIT',\n python_requires='>=3.8',\n install_requires=required,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n","repo_name":"Rastow/wave-packet-dynamics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"30719335041","text":"\"\"\"\nThese settings are used by the ``manage.py`` command.\n\nWith normal tests we want to use the fastest possible way which is an\nin-memory sqlite database but if you want to create South migrations you\nneed a persistant database.\n\n\"\"\"\nfrom .test_settings import * # NOQA\n\nfrom django.contrib import messages\n\nREQUIRED_APPS = [\n\n]\n\n\nINSTALLED_APPS = INSTALLED_APPS + REQUIRED_APPS\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'db.sqlite',\n }\n}\n\n","repo_name":"amgcorp-tech/djv-example","sub_path":"djv_example/tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"8818861530","text":"import os\nimport sys\nfrom collections import namedtuple\n\nimport numpy as np\nimport pandas as pd\n\nfrom keras_xlnet.backend import keras\nfrom keras_bert.layers import Extract\nfrom keras_xlnet import Tokenizer, load_trained_model_from_checkpoint, ATTENTION_TYPE_BI\nfrom keras_radam import RAdam\n\nEPOCH = 10\nBATCH_SIZE = 4\nSEQ_LEN = 256\nMODEL_NAME = 'ChnSentiCorp.h5'\n\n\nif len(sys.argv) != 2:\n print('python csc.py PRETRAINED_PATH')\n\n\npretrained_path = sys.argv[1]\nPretrainedPaths = namedtuple('PretrainedPaths', ['config', 'model', 'vocab'])\nconfig_path = os.path.join(pretrained_path, 'xlnet_config.json')\nmodel_path = os.path.join(pretrained_path, 'xlnet_model.ckpt')\nvocab_path = os.path.join(pretrained_path, 'spiece.model')\npaths = PretrainedPaths(config_path, model_path, vocab_path)\ntokenizer = Tokenizer(paths.vocab)\n\n\n# Read data\nclass DataSequence(keras.utils.Sequence):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __len__(self):\n return (len(self.y) + BATCH_SIZE - 1) // BATCH_SIZE\n\n def __getitem__(self, index):\n s = slice(index * BATCH_SIZE, (index + 1) * BATCH_SIZE)\n return [item[s] for item in self.x], self.y[s]\n\n\ndef generate_sequence(path):\n tokens, classes = [], []\n df = pd.read_csv(path, sep='\\t', error_bad_lines=False)\n for _, row in df.iterrows():\n text, cls = row['text_a'], row['label']\n encoded = tokenizer.encode(text)[:SEQ_LEN - 1]\n encoded = [tokenizer.SYM_PAD] * (SEQ_LEN - 1 - len(encoded)) + encoded + [tokenizer.SYM_CLS]\n tokens.append(encoded)\n classes.append(int(cls))\n tokens, classes = np.array(tokens), np.array(classes)\n segments = np.zeros_like(tokens)\n segments[:, -1] = 1\n lengths = np.zeros_like(tokens[:, :1])\n return DataSequence([tokens, segments, lengths], classes)\n\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\ntrain_seq = generate_sequence(os.path.join(current_path, 'train.tsv'))\ndev_seq = generate_sequence(os.path.join(current_path, 'dev.tsv'))\ntest_seq = generate_sequence(os.path.join(current_path, 'test.tsv'))\n\n\n# Load pretrained model\nmodel = load_trained_model_from_checkpoint(\n config_path=paths.config,\n checkpoint_path=paths.model,\n batch_size=BATCH_SIZE,\n memory_len=0,\n target_len=SEQ_LEN,\n in_train_phase=False,\n attention_type=ATTENTION_TYPE_BI,\n)\n\n\n# Build classification model\nlast = model.output\nextract = Extract(index=-1, name='Extract')(last)\ndense = keras.layers.Dense(units=768, name='Dense')(extract)\nnorm = keras.layers.BatchNormalization(name='Normal')(dense)\noutput = keras.layers.Dense(units=2, activation='softmax', name='Softmax')(norm)\nmodel = keras.models.Model(inputs=model.inputs, outputs=output)\nmodel.summary()\n\n\n# Fit model\nif os.path.exists(MODEL_NAME):\n model.load_weights(MODEL_NAME)\n\nmodel.compile(\n optimizer=RAdam(lr=2e-5),\n loss='sparse_categorical_crossentropy',\n metrics=['sparse_categorical_accuracy'],\n)\n\nmodel.fit_generator(\n generator=train_seq,\n validation_data=dev_seq,\n epochs=EPOCH,\n callbacks=[\n keras.callbacks.EarlyStopping(\n monitor='val_sparse_categorical_accuracy',\n restore_best_weights=True,\n patience=3,),\n ],\n)\n\nmodel.save_weights(MODEL_NAME)\n\n# Evaluation\nresults = model.predict_generator(test_seq, verbose=True).argmax(axis=-1)\ntp, fp, fn, tn = 0, 0, 0, 0\nfor i in range(len(results)):\n if results[i] == 1:\n if test_seq.y[i] == 1:\n tp += 1\n else:\n fp += 1\n else:\n if test_seq.y[i] == 1:\n fn += 1\n else:\n tn += 1\n\nprint('Confusion:')\nprint('[{}, {}]'.format(tp, fp))\nprint('[{}, {}]'.format(fn, tn))\n\nprint('Accuracy: %.4f' % ((tp + tn) / (tp + fp + fn + tn)))\nprint('Precision: %.2f' % (100.0 * tp / (tp + fp + 1e-8)))\nprint('Recall: %.2f' % (100.0 * tp / (tp + fn + 1e-8)))\nprint('F1-Score: %.2f' % (100.0 * (2.0 * tp) / (2.0 * tp + fp + fn)))\n","repo_name":"CyberZHG/keras-xlnet","sub_path":"demo/ChnSentiCorp/csc.py","file_name":"csc.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"23"} +{"seq_id":"72552275900","text":"import re\nimport string\n\n\ndef post_process_token_list(token_list):\n \"\"\"\n Post process a token list after tokenization\n Parameter : token_list [str]\n return processed token list\n \"\"\"\n out = []\n for token in token_list:\n if is_punc(token):\n continue\n elif is_number(token):\n out.append(\"xxnum\")\n else:\n out.append(token.strip().lower())\n return out\n\n\n\ndef is_punc(text: str) -> bool:\n \"\"\"check whether the text is a punctuation\"\"\"\n return all(map(lambda ch: (ch in string.punctuation+ \" \"), text))\n\n\ndef is_number(text: str) -> bool:\n \"\"\"check whether the text is a number\"\"\"\n\n return all(\n map(\n lambda ch: (ch in \"0123456789๐๑๒๓๔๕๖๗๘๙\"),\n filter(lambda ch: not (ch in string.punctuation), text)\n )\n )\n\n","repo_name":"boomza654/temp","sub_path":"token_post_process.py","file_name":"token_post_process.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"47469190238","text":"import random\nprint('Estou pensando em um número, advinhe qual é!')\nnum = int(input('Digite um número entre 0 e 5: '))\nnumPc = int(random.randrange(0, 5))\n\nif num > 5:\n print('Epa! Somente números entre 0 e 5 são válidos! Tente denovo')\nif num == numPc:\n print('Quem diria você acertou! Estava justamente pensando em {}'.format(numPc))\nelse:\n print('Mais sorte na próxima! Estava pensanado em {}, e você chutou {}'.format(numPc, num))","repo_name":"CaioVRS21/estudos-python","sub_path":"AulasCursoEmVideo/desafios/desafio028.py","file_name":"desafio028.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"74710152699","text":"#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom irods.models import DataObject, DataObjectMeta, Collection, Resource\nfrom irods.column import Criterion, Like\nfrom irods.session import iRODSSession\nimport os, sys\nfrom getopt import getopt,GetoptError\n\ntry:\n opt, args = getopt(sys.argv[1:],'R:n:e:')\nexcept GetoptError as e:\n print (\"\"\"usage: %s [ -e .ext ] [ -n filestub ]\n -e defaults to \".jpg\"\n -n defaults to \"stickers\" \"\"\" % (sys.argv[0],)) \n sys.exit(1)\n\nopts = {} ; opts.update (opt)\n\ntry:\n env_file = os.environ['IRODS_ENVIRONMENT_FILE']\nexcept KeyError:\n env_file = os.path.expanduser('~/.irods/irods_environment.json')\n\nsession = iRODSSession(irods_env_file=env_file) \n\nobject_name_stub = opts.get('-n', \"stickers\")\nobject_name_ext = opts.get('-e','.jpg')\nresc_name = opts.get ('-R', \"lts_resc\" )\n\nif not resc_name : resc_name = '%'\n \nq = session.query ( Collection.name , DataObject.name, Resource.name )\nq.filter( Like (DataObject.name, object_name_stub + '%x%' + object_name_ext ),\n Like (Resource.name, resc_name) )\n\nresultsIter = q.get_results()\n\nprint (\"=== QUERY RESULTS: ===\")\nfor result in resultsIter :\n print( result[Resource.name] + \" :\\t\\t\" +\\\n result[Collection.name] + \"/\" + result[DataObject.name] )\n\n\n\n","repo_name":"irods/irods_training","sub_path":"advanced/hpc_data_to_compute/detect_thumbnails.py","file_name":"detect_thumbnails.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"23"} +{"seq_id":"28676565098","text":"import os\nimport torch.utils.data as data\nimport torch\nfrom tqdm import tqdm\nfrom glob import iglob\nimport pickle as pkl\nimport random\nimport numpy as np\nfrom sklearn import preprocessing\n\n\nclass ContinuousTestsetByUtt(data.Dataset):\n '''\n '''\n def __init__(self, feature_dir, pairs_path, spkr2gender, hop=10, window_size=32, normalize=False, std=False, torch=True, embedding_size=64):\n\n self.feature_dir = feature_dir\n self.pairs_path = pairs_path\n self.spkr2gender = spkr2gender\n self.test_pairs = self.get_test_pair(self.pairs_path)\n self.hop = hop\n self.window_size = window_size\n self.normalize = normalize\n self.std = std\n self.torch = torch\n self.embedding_size = embedding_size\n\n def __getitem__(self, index):\n '''\n\n Args:\n index: Index of the triplet or the matches - not of a single features\n\n Returns:\n\n '''\n\n path_1, path_2, label, gender_label, spkr_1, spkr_2 = self.test_pairs[index]\n\n try:\n with open(path_1, \"rb\") as f:\n utt_1 = pkl.load(f)\n except:\n with open(path_1.replace(\"vad_mfcc\", \"mfcc\"), \"rb\") as f:\n utt_1 = pkl.load(f)\n try:\n with open(path_2, \"rb\") as f:\n utt_2 = pkl.load(f)\n except:\n with open(path_2.replace(\"vad_mfcc\", \"mfcc\"), \"rb\") as f:\n utt_2 = pkl.load(f)\n\n if self.torch:\n img_1 = torch.FloatTensor(preprocessing.scale(utt_1, with_mean=self.normalize, with_std=self.std, axis=1).T)\n img_2 = torch.FloatTensor(preprocessing.scale(utt_2, with_mean=self.normalize, with_std=self.std, axis=1).T)\n\n\n # img_1 = self.get_segment(utt_1)\n # img_2 = self.get_segment(utt_2)\n # if self.torch:\n # img_1 = torch.FloatTensor(img_1.transpose((0, 2, 1)))\n # img_2 = torch.FloatTensor(img_2.transpose((0, 2, 1)))\n\n return [img_1, img_2, label, gender_label, spkr_1, spkr_2]\n\n def __len__(self):\n return len(self.test_pairs)\n\n def get_segment(self, features):\n network_inputs = []\n total_frames = len(features)\n\n features = preprocessing.scale(features, with_mean=self.normalize, with_std=self.std)\n if not self.torch:\n return features.astype(np.float64)\n end = total_frames - self.window_size\n\n for i in range(0, total_frames, self.hop):\n if i > end:\n break\n frames_slice = features[i:i + self.window_size]\n network_inputs.append(frames_slice)\n\n # for _ in range(self.augment):\n # network_inputs += network_inputs\n\n return np.array(network_inputs)\n\n\n def get_test_pair(self, pairs_path):\n data = [line.strip().split(' ') for line in open(pairs_path, 'r').readlines()]\n\n test_pairs = []\n\n for line in tqdm(data):\n if line[0] == '1':\n label = True\n else:\n label = False\n\n path_1 = f\"{self.feature_dir}/{line[1]}\"\n path_2 = f\"{self.feature_dir}/{line[2]}\"\n spkr_1 = line[1].split('/')[0]\n spkr_2 = line[2].split('/')[0]\n gender_1 = self.spkr2gender[spkr_1]\n gender_2 = self.spkr2gender[spkr_2]\n if gender_1 == gender_2:\n gender_label = '1'\n else:\n gender_label = '0'\n test_pairs.append((path_1, path_2, label, gender_label, spkr_1, spkr_2))\n\n return test_pairs\n\n @staticmethod\n def custom_collate_fn(batch):\n\n segment_length_1 = []\n segment_length_2 = []\n label = []\n gender = []\n for pair in batch:\n segment_length_1.append(len(pair[0]))\n segment_length_2.append(len(pair[1]))\n label.append(pair[2])\n gender.append(pair[3])\n img_1, img_2, _, _ = zip(*batch)\n\n return torch.cat(img_1), torch.cat(img_2), label, gender, segment_length_1, segment_length_2\n\nif __name__ == \"__main__\":\n\n feature_dir = \"/mnt/E/arthur.wang/aishell/aishell1/utt/logfbank/test\"\n pairs_path = \"/mnt/E/arthur.wang/aishell/aishell1/aishell1_test_list.txt\"\n test_set = ContinuosTestsetBySpeaker(feature_dir, pairs_path)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=10, shuffle=False, collate_fn=test_set.custom_collate_fn)\n for data_a, data_b, label, length_a, length_b in test_loader:\n print(type(data_a))\n exit()\n data_a = torch.unsqueeze(data_a, 1)\n start_a = 0\n start_b = 0\n embedding_a = []\n for segment_a, segment_b in zip(length_a, length_b):\n temp = data_a[start_a:segment_a]\n temp = torch.mean(temp, 0)\n embedding_a.append(temp)\n print(len(embedding_a))\n embedding_a = torch.cat(embedding_a, 0)\n print(embedding_a.shape)\n exit()\n # feature_1, feature_2, label = test_set[0]\n # print(feature_1.shape)","repo_name":"ntumirlab/Speaker-Verification","sub_path":"TestSet/ContinuousTestsetByUtt.py","file_name":"ContinuousTestsetByUtt.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"25495637494","text":"from pathlib import Path\n\nfrom orquestra.sdk._base import _config\nfrom orquestra.sdk.schema import configs\n\n\ndef write_user_config_file(\n dirpath: Path,\n runtime_config: configs.RuntimeConfiguration,\n):\n config_file = dirpath / _config.CONFIG_FILE_NAME\n config_file_contents = configs.RuntimeConfigurationFile(\n version=_config.CONFIG_FILE_CURRENT_VERSION,\n configs={runtime_config.config_name: runtime_config},\n )\n config_file.write_text(config_file_contents.json())\n","repo_name":"zapatacomputing/orquestra-workflow-sdk","sub_path":"tests/sdk/project_state.py","file_name":"project_state.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"23"} +{"seq_id":"5489128423","text":"import unittest\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\nfrom django.test import TestCase\nfrom django.shortcuts import reverse\nfrom django.template import Context, Template\nfrom paystack.utils import MockRequest\n\n\nclass PaystackTestCase(TestCase):\n def get_mock(self, mock_call, args):\n mock_instance = mock_call.return_value\n mock_instance.verify_payment.return_value = args\n return mock_instance\n\n @patch('paystack.utils.PaystackAPI')\n def test_when_successful_redirects_to_default_success_url_when_not_set(\n self, mock_call):\n mock_instance = self.get_mock(mock_call,\n (True, \"verification successful\"))\n response = self.client.get(\"{}?amount=30000&trxref=biola23\".format(\n reverse('paystack:verify_payment', args=['1234'])))\n mock_instance.verify_payment.assert_called_once_with(\n \"biola23\",\n amount=30000,\n )\n self.assertEqual(response.url,\n reverse(\n 'paystack:successful_verification',\n args=['1234']))\n\n response = self.client.get(response.url)\n self.assertEqual(response.url, reverse('paystack:success_page'))\n\n @patch('paystack.utils.PaystackAPI')\n def test_when_fails_redirects_to_default_fail_url_when_not_set(\n self, mock_call):\n mock_instance = self.get_mock(mock_call, (False, \"failed transaction\"))\n response = self.client.get(\"{}?amount=30000&trxref=biola23\".format(\n reverse('paystack:verify_payment', args=['1234'])))\n mock_instance.verify_payment.assert_called_once_with(\n \"biola23\", amount=30000)\n self.assertEqual(response.url,\n reverse(\n 'paystack:failed_verification', args=['1234']))\n response = self.client.get(response.url)\n self.assertEqual(response.url, reverse('paystack:failed_page'))\n\n def test_template_tag_renders_correctly(self):\n template_val = Template(\"\"\"\n {% load paystack %}\n {% paystack_button button_class=\"red\" amount=3000 email=\"gboze2@example.com\" %}\n \"\"\")\n context = Context({})\n template_response = template_val.render(context)\n self.assertIn('django-paystack-button', template_response)\n self.assertIn('gboze2@example.com', template_response)\n self.assertIn('300000', template_response)\n\n def mock_request(self, data, status_code=200):\n return MockRequest(data, status_code=status_code)\n\n\nclass NewTestCase(unittest.TestCase):\n def setUp(self):\n from paystack.utils import load_lib\n self.instance = load_lib('django_paystack.mock_implement')()\n\n @patch('requests.get')\n def test_can_load_external_module(self, mock_post):\n mock_post.return_value = MockRequest(\n {\n \"status\": False,\n \"message\": \"Invalid key\"\n }, status_code=400)\n response = self.instance.verify_payment(\"12345\", amount=27000)\n\n self.assertEqual(response, \"hello\")\n","repo_name":"gbozee/pypaystack","sub_path":"examples/django_paystack/django_paystack/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"23"} +{"seq_id":"2199018843","text":"#qagDate.py v1.3 20151108\r\n#qagDate.py v1.4 20151118\r\n\r\n\r\nimport datetime\r\n\r\n\r\nclass qagDateUtil():\r\n\r\n\r\n @classmethod\r\n def IsValidDateString(self, in_date, in_strformat=\"%Y%m%d\"):\r\n\r\n try:\r\n ff=self.ConvertDateFormat(in_date,\"String\",\"Ordinal\",in_strformat)\r\n return True\r\n except:\r\n return False\r\n\r\n\r\n @classmethod\r\n def ConvertDateToString(self,in_date):\r\n if type(in_date) is str:\r\n return in_date\r\n elif type(in_date) is int:\r\n return datetime.date.fromordinal(in_date).strftime('%Y%m%d')\r\n else:\r\n return \"Error: Date must be YYYYMMDD string or Ordinal in ConvertDateToString\" \r\n\r\n\r\n @classmethod\r\n def ConvertDateToOrdinal(self, in_date):\r\n if type(in_date) is str:\r\n thedate = datetime.datetime.strptime(in_date, \"%Y%m%d\")\r\n return thedate.toordinal()\r\n elif type(in_date) is int:\r\n return in_date\r\n else:\r\n return \"Error: Date must be YYYYMMDD string or Ordinal in ConvertDateToOrdinal\" \r\n\r\n\r\n def excel_date(self,date1): #DateTime to Excel date (serial)\r\n temp = datetime.datetime(1899, 12, 31)\r\n delta = date1 - temp\r\n return float(delta.days) + (float(delta.seconds) / 86400) +1.0\r\n\r\n\r\n def float2datetime(self,serial):\r\n seconds = (serial - 25569) * 86400.0\r\n return datetime.datetime.utcfromtimestamp(seconds)\r\n\r\n\r\n def numpyfloat2datetime(self,numpyserial):\r\n return self.float2datetime(numpyserial-693594.0)\r\n\r\n\r\n\r\n\r\n @classmethod\r\n def ConvertDateFormat(self, in_date, in_type, out_type, in_strformat=\"%Y%m%d\", out_strformat=\"%Y%m%d\"):\r\n #ordinal: 1 January 0001 has ordinal value of 1\r\n if in_type is 'String':\r\n thedate = datetime.datetime.strptime(in_date, in_strformat) #convert string to datetime\r\n ordval = thedate.toordinal() #convert datetime to ordinal\r\n elif in_type is 'Ordinal':\r\n ordval = in_date\r\n elif in_type is 'DateTime':\r\n ordval = in_date.toordinal() \r\n elif in_type is 'Javascript':\r\n ordval = in_date / (24.0 * 3600000.0) + 719163.0 \r\n else:\r\n return \"Error: invalid in_type in ConvertDateFormat\"\r\n\r\n if out_type is 'String':\r\n outdate = datetime.date.fromordinal(ordval).strftime(out_strformat) \r\n elif out_type is 'Ordinal':\r\n outdate = ordval\r\n elif out_type is 'Excel':\r\n ordDateTime = datetime.datetime.fromordinal(ordval)\r\n temp = datetime.datetime(1899, 12, 31)\r\n delta = ordDateTime - temp\r\n outdate = float(delta.days) + (float(delta.seconds) / 86400) +1.0\r\n elif out_type is 'DateTime':\r\n outdate = datetime.datetime.fromordinal(ordval)\r\n elif out_type is 'Javascript':\r\n outdate = (ordval - 719163.0) * 24 * 3600000 \r\n else:\r\n return \"Error: invalid out_type in ConvertDateFormat\"\r\n\r\n return outdate\r\n\r\n\r\n @classmethod\r\n def ConvertDateFormatShifter(self, in_date, in_type, out_type, in_strformat=\"%Y%m%d\", out_strformat=\"%Y%m%d\", shiftNum=0, shiftUnit=\"d\"):\r\n #ordinal: 1 January 0001 has ordinal value of 1\r\n if in_type is 'String':\r\n thedate = datetime.datetime.strptime(in_date, in_strformat) #convert string to datetime\r\n ordval = thedate.toordinal() #convert datetime to ordinal\r\n elif in_type is 'Ordinal':\r\n ordval = in_date\r\n elif in_type is 'DateTime':\r\n ordval = in_date.toordinal() \r\n else:\r\n return \"Error: invalid in_type in ConvertDateFormat\"\r\n\r\n if shiftNum is not 0:\r\n if shiftUnit is \"d\": \r\n ordval = ordval + shiftNum\r\n elif shiftUnit is \"w\": \r\n ordval = ordval + shiftNum*7\r\n \r\n if out_type is 'String':\r\n outdate = datetime.date.fromordinal(ordval).strftime(out_strformat) \r\n elif out_type is 'Ordinal':\r\n outdate = ordval\r\n elif out_type is 'Excel':\r\n ordDateTime = datetime.datetime.fromordinal(ordval)\r\n temp = datetime.datetime(1899, 12, 31)\r\n delta = ordDateTime - temp\r\n outdate = float(delta.days) + (float(delta.seconds) / 86400) +1.0\r\n elif out_type is 'DateTime':\r\n outdate = datetime.datetime.fromordinal(ordval)\r\n elif out_type is 'Javascript':\r\n outdate = (ordval - 719163.0) * 24 * 3600000 \r\n else:\r\n return \"Error: invalid out_type in ConvertDateFormat\"\r\n\r\n return outdate\r\n\r\n \r\n\r\n\r\n \r\n @classmethod\r\n def ConvertDateFormatList(self, in_dateList, in_type, out_type, in_strformat=\"%Y%m%d\", out_strformat=\"%Y%m%d\"):\r\n n = len(in_dateList)\r\n return map(self.ConvertDateFormat,in_dateList,[in_type]*n, [out_type]*n, [in_strformat]*n, [out_strformat]*n)\r\n\r\n\r\n\r\n\r\n","repo_name":"gitrepodg1/NYCtaxi","sub_path":"qagDate.py","file_name":"qagDate.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"27667166813","text":"from time import perf_counter\nimport re\nimport bs4 \nimport requests\n\n\ndef scrape_gisd(ln_names_dict):\n token = re.compile(\"//li//a//small\")\n gisd_list = []\n counter = 0\n try:\n for i in ln_names_dict.keys():\n print(counter)\n counter += 1\n soup2 = \"\"\n url = \"http://www.iucngisd.org/gisd/speciesname/\" + ln_names_dict[i][0]\n page = requests.get(url, timeout=15)\n soup = str(bs4.BeautifulSoup(page.text, 'html.parser'))\n soup2 += ln_names_dict[i][0] + \"\\n\"\n soup2 += soup\n gisd_list.append(soup2)\n # write_soup(soup2)\n except TimeoutError:\n pass\n return(gisd_list)\n\n# def write_soup(soup2): # Temp file, don't actually use this.\n# print(len(soup2))\n# print(type(soup2))\n# with open('D:\\\\Project_IAS\\\\Scraped\\\\Scraped_RA\\\\Scraped_GISD_soup.txt', 'w', encoding=\"utf-8\") as f:\n# print(\"here\")\n# f.write(soup2)\n# f.close()\n\ndef get_gisd_spec(ln_names_dict, gisd_list):\n url = \"http://www.iucngisd.org/gisd/speciesname/\"\n impact_token = re.compile(\"javascript:;.{2}(?:Not Evaluated|No Alien Population|Data Deficient|Minimal Concern|Minor|Moderate|Major|Massive)\")\n impact_short = \">.*\"\n absent_token = re.compile(\"is not present yet in our archive.\")\n counter = 0\n\n for count, value in enumerate(ln_names_dict.keys()):\n matches = []\n absent = []\n matches = re.findall(impact_token, gisd_list[count])\n matches = list(set(matches))\n absent = re.findall(absent_token, gisd_list[count])\n absent = list(set(absent))\n if len(matches) > 0:\n spec_match = re.findall(impact_short, matches[0])\n name = ln_names_dict[value][0]\n name = name.replace(\" \", \"+\")\n spec_url = url + name\n quote = \"Species EICAT classification: \" + spec_match[0] + \" on: \" + spec_url\n ln_names_dict[value].append(quote)\n counter += 1\n elif len(absent) > 0:\n pass\n else:\n counter += 1\n name = ln_names_dict[value][0]\n name = name.replace(\" \", \"+\")\n spec_url = url + name\n quote = \"Species general info available on: \" + spec_url\n ln_names_dict[value].append(quote)\n\n return ln_names_dict\n\ndef main_scraper(ln_names_dict):\n gisd_list = scrape_gisd(ln_names_dict)\n ln_scraping_dict = get_gisd_spec(ln_names_dict, gisd_list)\n return ln_scraping_dict\n\nif __name__ == \"__main__\":\n \"\"\"\n \n \"\"\"\n t1_start = perf_counter() \n print( \"-\" * 80, \"\\n\", \"Controller start\", \"\\n\", \"-\" * 80)\n ias_file = \"D:\\\\Project_IAS\\\\ProjectCode\\\\ias_names_big_unedited\"\n ln_names_dict = {}\n\n try: \n import RA_scraping_suite\n except ModuleNotFoundError:\n from RA_Code import RA_scraping_suite\n\n ln_names_dict = RA_scraping_suite.read_file(ias_file)\n ln_scraping_dict = main_scraper(ln_names_dict)\n\n print(\"-\" * 80, \"\\n\", \"Controller end, script finished\", \"\\n\", \"-\" * 80)\n t1_stop = perf_counter()\n print(\"Elapsed time:\", t1_stop, t1_start) \n print(\"Elapsed time during the whole program in seconds:\",\n t1_stop-t1_start)","repo_name":"RutgerJTK/IAS-Internship-ProjectCode","sub_path":"RA_Code/ScrapeGISD.py","file_name":"ScrapeGISD.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"69916693179","text":"import argparse\n\nfrom Step2_cds2aa_class import FastaIO\nfrom multiprocessing import Pool\n\n\nclass FastaIOParallel(FastaIO):\n def __init__(self, fasta_file_name):\n super().__init__(fasta_file_name)\n\n def print_pep_parallel(self, threads=2):\n \"\"\"翻译cds dict生成氨基酸序列\"\"\"\n self.print = \"\"\n with Pool(threads) as p:\n result = p.map(self.get_pep_fasta_by_name, self.seq.keys())\n print(\"\".join(result), end=\"\")\n\n\nif __name__ == \"__main__\":\n # my_fasta = FastaIO(\"/Users/liuhui/PycharmProjects/python_learn/Lesson3/homework_KEYI/Homework.fasta\")\n # #https://www.slideshare.net/tisto/argparse-python-command-line-parser\n parser = argparse.ArgumentParser(\n description=\"Translate cds to pep\"\n )\n #parser.print_help()\n # add positional arguments\n parser.add_argument('input', help=\"Input file of cds\")\n # reads from sys.argv and extract args\n args = parser.parse_args()\n\n input_file_name = args.input\n my_fasta = FastaIOParallel(input_file_name)\n my_fasta.print_pep_parallel()\n","repo_name":"lhui2010/python_learn","sub_path":"Lesson5/Step5_cds2aa_parallel.py","file_name":"Step5_cds2aa_parallel.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"38261285352","text":"import unittest\nfrom typing import List\n\nimport util.common as utility\n\n\nclass TestUtility(unittest.TestCase):\n '''utility.py'''\n\n def test_walk_spine(self) -> None:\n '''it works'''\n tree = {'a': {'b': {'c': 1}}}\n self.assertEqual(utility.walk_spine(tree, ['a']), {'b': {'c': 1}})\n self.assertEqual(utility.walk_spine(tree, ['a', 'b']), {'c': 1})\n self.assertEqual(utility.walk_spine(tree, ['a', 'b', 'c']), 1)\n\n def test_prefix_tuples(self) -> None:\n '''it works'''\n out = utility.prefix_tuples(1, [(2, 3), (3, 4), (4, 5)])\n out = list(out)\n self.assertEqual(out, [(1, 2, 3), (1, 3, 4), (1, 4, 5)])\n\n def test_pretty_date(self) -> None:\n pairs = [\n ('2023-01-01', 'January 1st, 2023'),\n ('2023-01-11', 'January 11th, 2023'),\n ('2023-01-12', 'January 12th, 2023'),\n ('2023-01-13', 'January 13th, 2023'),\n ('2023-10-02', 'October 2nd, 2023'),\n ('2023-10-22', 'October 22nd, 2023'),\n ('2023-12-03', 'December 3rd, 2023'),\n ('2023-12-23', 'December 23rd, 2023'),\n ('2023-04-04', 'April 4th, 2023'),\n ('2023-04-24', 'April 24th, 2023'),\n ]\n for before, after in pairs:\n self.assertEqual(utility.pretty_date(before), after)\n\n def test_take(self) -> None:\n '''it works'''\n xs = [1, 2, 3, 4, 5]\n n = 3\n expected = [1, 2, 3]\n self.assertEqual(utility.take(xs, n), expected)\n\n xs = [1, 2, 3]\n n = 5\n expected = [1, 2, 3]\n self.assertEqual(utility.take(xs, n), expected)\n\n xs = [1, 2, 3, 4, 5]\n n = 0\n expected: List[int] = []\n self.assertEqual(utility.take(xs, n), expected)\n\n xs = range(100000000)\n n = 5\n expected = [0, 1, 2, 3, 4]\n self.assertEqual(utility.take(xs, n), expected)\n\n def test_flatten(self) -> None:\n '''it works'''\n self.assertEqual([0, 1, 2], utility.flatten([[0], [1, 2]]))\n self.assertEqual([], utility.flatten([[]]))\n\n def test_tree_size(self) -> None:\n '''Test the tree_size function with various inputs.'''\n # Test a tree with no leaves\n tree = {'a': {'b': {}, 'c': {}}, 'd': {}}\n self.assertEqual(utility.tree_size(tree), 0)\n\n # Test a tree with one leaf\n tree = {'a': {'b': {'c': [1]}}}\n self.assertEqual(utility.tree_size(tree), 1)\n\n # Test a tree with multiple leaves\n tree = {'a': {'b': [3, 3]}, 'c': [4, 4, 4]}\n self.assertEqual(utility.tree_size(tree), 5)\n\n # Test a tree with nested leaves\n tree = {'a': {'b': {'c': {'d': [1]}}}}\n self.assertEqual(utility.tree_size(tree), 1)\n\n # Test a tree with multiple levels\n tree = {'a': {'b': {'c': {'d': [1, 2, 3]}}, 'e': {'f': [4]}}, 'g': [5]}\n self.assertEqual(utility.tree_size(tree), 5)\n\n def test_extract_leaves(self) -> None:\n '''grab the leaves for this tree'''\n tree = {'a': {'b': 3}, 'c': 4}\n leaves = list(utility.extract_leaves(tree))\n wanted = [3, 4]\n self.assertEqual(sorted(leaves), sorted(wanted))\n\n def test_extract_branches(self) -> None:\n '''grab the branches for this tree'''\n tree = {'a': {'b': 3}, 'c': 4}\n branches = list(utility.extract_branches(tree))\n wanted = ['a', 'b', 'c']\n self.assertEqual(sorted(branches), sorted(wanted))\n\n def test_hmap(self) -> None:\n '''fold-ish thing'''\n out = utility.hmap(0, lambda x: x + 1, lambda x: x * 5)\n self.assertEqual(out, 5)\n\n out = utility.hmap(0, lambda x: x + 1, lambda x: x * 5, lambda x: x - 1)\n self.assertEqual(out, 4)\n\n out = utility.hmap(\n 0, lambda x: x + 1, lambda x: x * 5, lambda x: x - 1, lambda x: x / 2\n )\n self.assertEqual(out, 2.0)\n\n def test_is_date(self) -> None:\n '''it works'''\n for positive in ('2017-01-01', '2000-11-21'):\n self.assertTrue(utility.is_date(positive))\n\n for negative in ('2000-91-21', '2000-21', '2000-21'):\n self.assertFalse(utility.is_date(negative))\n\n def test_strip_date(self) -> None:\n '''it works'''\n examples = [\n ('Sund Rock 2017-01-01', 'Sund Rock'),\n ('Sund Rock 4', 'Sund Rock 4'),\n ('2017-01-01', '2017-01-01'),\n ]\n for before, after in examples:\n self.assertEqual(utility.strip_date(before), after)\n\n def test_file_content_matches(self) -> None:\n '''it works'''\n with open('LICENSE') as fd:\n license = fd.read()\n\n self.assertTrue(utility.file_content_matches('LICENSE', license))\n\n self.assertFalse(utility.file_content_matches('LICENSE', 'Hello there'))\n self.assertFalse(\n utility.file_content_matches('LICENSE', license.replace(' ', '!'))\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Gandalf-/diving","sub_path":"test/test_utility.py","file_name":"test_utility.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"31224035453","text":"import gym\nimport torch\nimport numpy as np\nfrom buffer import OnlineReplayBuffer\nfrom net import GaussPolicyMLP\nfrom critic import ValueLearner, QLearner\nfrom ppo import ProximalPolicyOptimization\nfrom utils import CONST_EPS, log_prob_func, orthogonal_initWeights\n\n\nclass BehaviorCloning:\n _device: torch.device\n _policy: GaussPolicyMLP\n _optimizer: torch.optim\n _policy_lr: float\n _batch_size: int\n def __init__(\n self,\n device: torch.device,\n state_dim: int,\n hidden_dim: int, \n depth: int,\n action_dim: int,\n policy_lr: float,\n batch_size: int\n ) -> None:\n super().__init__()\n self._device = device\n self._policy = GaussPolicyMLP(state_dim, hidden_dim, depth, action_dim).to(device)\n orthogonal_initWeights(self._policy)\n self._optimizer = torch.optim.Adam(\n self._policy.parameters(),\n lr = policy_lr\n )\n self._lr = policy_lr\n self._batch_size = batch_size\n \n\n def loss(\n self, replay_buffer: OnlineReplayBuffer,\n ) -> torch.Tensor:\n s, a, _, _, _, _, _, _ = replay_buffer.sample(self._batch_size)\n dist = self._policy(s)\n log_prob = log_prob_func(dist, a) \n loss = (-log_prob).mean()\n\n return loss\n\n\n def update(\n self, replay_buffer: OnlineReplayBuffer,\n ) -> float:\n policy_loss = self.loss(replay_buffer)\n\n self._optimizer.zero_grad()\n policy_loss.backward()\n self._optimizer.step()\n\n return policy_loss.item()\n\n\n def select_action(\n self, s: torch.Tensor, is_sample: bool\n ) -> torch.Tensor:\n dist = self._policy(s)\n if is_sample:\n action = dist.sample()\n else: \n action = dist.mean\n # clip \n action = action.clamp(-1., 1.)\n return action\n\n\n def offline_evaluate(\n self,\n env_name: str,\n seed: int,\n mean: np.ndarray,\n std: np.ndarray,\n eval_episodes: int=10\n ) -> float:\n env = gym.make(env_name)\n env.seed(seed)\n\n total_reward = 0\n for _ in range(eval_episodes):\n s, done = env.reset(), False\n while not done:\n s = torch.FloatTensor((np.array(s).reshape(1, -1) - mean) / std).to(self._device)\n a = self.select_action(s, is_sample=False).cpu().data.numpy().flatten()\n s, r, done, _ = env.step(a)\n total_reward += r\n \n avg_reward = total_reward / eval_episodes\n d4rl_score = env.get_normalized_score(avg_reward) * 100\n return d4rl_score\n \n\n def save(\n self, path: str\n ) -> None:\n torch.save(self._policy.state_dict(), path)\n print('Behavior policy parameters saved in {}'.format(path))\n \n\n def load(\n self, path: str\n ) -> None:\n self._policy.load_state_dict(torch.load(path, map_location=self._device))\n print('Behavior policy parameters loaded')\n\n\n\nclass BehaviorProximalPolicyOptimization(ProximalPolicyOptimization):\n\n def __init__(\n self,\n device: torch.device,\n state_dim: int,\n hidden_dim: int, \n depth: int,\n action_dim: int,\n policy_lr: float,\n clip_ratio: float,\n entropy_weight: float,\n decay: float,\n omega: float,\n batch_size: int\n ) -> None:\n super().__init__(\n device = device,\n state_dim = state_dim,\n hidden_dim = hidden_dim,\n depth = depth,\n action_dim = action_dim,\n policy_lr = policy_lr,\n clip_ratio = clip_ratio,\n entropy_weight = entropy_weight,\n decay = decay,\n omega = omega,\n batch_size = batch_size)\n\n\n def loss(\n self, \n replay_buffer: OnlineReplayBuffer,\n Q: QLearner,\n value: ValueLearner,\n is_clip_decay: bool,\n ) -> torch.Tensor:\n # -------------------------------------Advantage-------------------------------------\n s, _, _, _, _, _, _, _ = replay_buffer.sample(self._batch_size)\n old_dist = self._old_policy(s)\n a = old_dist.rsample()\n advantage = Q(s, a) - value(s)\n advantage = (advantage - advantage.mean()) / (advantage.std() + CONST_EPS)\n # -------------------------------------Advantage-------------------------------------\n new_dist = self._policy(s)\n\n new_log_prob = log_prob_func(new_dist, a)\n old_log_prob = log_prob_func(old_dist, a)\n ratio = (new_log_prob - old_log_prob).exp()\n \n advantage = self.weighted_advantage(advantage)\n\n loss1 = ratio * advantage \n\n if is_clip_decay:\n self._clip_ratio = self._clip_ratio * self._decay\n else:\n self._clip_ratio = self._clip_ratio\n\n loss2 = torch.clamp(ratio, 1 - self._clip_ratio, 1 + self._clip_ratio) * advantage \n \n entropy_loss = new_dist.entropy().sum(-1, keepdim=True) * self._entropy_weight\n \n loss = -(torch.min(loss1, loss2) + entropy_loss).mean()\n\n return loss\n\n\n def offline_evaluate(\n self,\n env_name: str,\n seed: int,\n mean: np.ndarray,\n std: np.ndarray,\n eval_episodes: int=10\n ) -> float:\n env = gym.make(env_name)\n avg_reward = self.evaluate(env_name, seed, mean, std, eval_episodes)\n d4rl_score = env.get_normalized_score(avg_reward) * 100\n return d4rl_score\n","repo_name":"Dragon-Zhuang/BPPO","sub_path":"bppo.py","file_name":"bppo.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"23"} +{"seq_id":"35343307773","text":"import streamlit as st\r\nfrom PIL import Image\r\n\r\nst.set_page_config(\r\n page_title='Home',\r\n page_icon= '🍲🧡'\r\n)\r\n\r\n# img_path = 'C:/Users/felipe.souza/repos/FTC/dataset/images/'\r\nimage = Image.open('img1.png' )\r\nst.sidebar.image(image, width=120)\r\n\r\nst.sidebar.markdown('# Cury Company') # A quantidade de # determina o tamanho da fonte\r\nst.sidebar.markdown('## Fastest Delivery in Town')\r\nst.sidebar.markdown(\"\"\"___\"\"\") # Cria uma linha\r\n\r\nst.write(\"# Curry Company Growth Dashboard\")\r\n\r\nst.markdown(\r\n \"\"\"\r\n Growth Dashboard foi construído para acompanhar as métricas de crescimento dos entregadores e restaurantes.\r\n ### Como utilizar o Growth Dashboard?\r\n - Visão Empresa:\r\n - Visão Gerencial: Métricas gerais de comportamento.\r\n - Visão Tática: Indicadores semanais de crescimento.\r\n - Visão Geográfica: Insights de geolocalização.\r\n - Visão Entregador:\r\n - Acompanhamento dos indicadores semanais de crescimento.\r\n Visão Restaurante:\r\n - indicadores semanais de crescimento dos restaurantes.\r\n ### Ask for Help\r\n - Call me on discord: Felipe Arruda#9488\r\n \"\"\"\r\n)\r\n\r\n","repo_name":"Felipe-Arruda/curry_company","sub_path":"Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"39264727901","text":"encoding:utf8\r\n# class A:\r\n# \tdef __init__(self):\r\n# \t\tprint 'enter A'\r\n# \t\tprint 'leave A'\r\n# class B(A):\r\n# \tdef __init__(self):\r\n# \t\tprint 'enter B' #打印enter B\r\n# \t\tA.__init__(self) #打印enter A,打印leave A,使用非绑定的类方法(用类名来引用的方法),并在参数列表中,引入待绑定的对象(self),从而达到调用父类的目的\r\n# \t\tprint 'leave B' #打印leave B\r\n# B()\r\n\r\n'''\r\n这样做的缺点是,当一个子类的父类发生变化时(如类B的父类由A变为C时),必须遍历整个类定义,\r\n把所有的通过非绑定的方法的类名全部替换过来\r\n\r\n'''\r\nclass MyStrategy(strategy.BacktestingStrategy):\r\n def __init__(self, feed, instrument, smaPeriod):\r\n super(MyStrategy, self).__init__(feed, 1000) #super的意思是去找strategy这个类的父类,调用里面的方法\r\n self.__position = None\r\n self.__instrument = instrument\r\n # We'll use adjusted close values instead of regular close values.\r\n self.setUseAdjustedValues(True)\r\n self.__sma = ma.SMA(feed[instrument].getPriceDataSeries(), smaPeriod)\r\n\r\n def onEnterOk(self, position):\r\n execInfo = position.getEntryOrder().getExecutionInfo()\r\n self.info(\"BUY at $%.2f\" % (execInfo.getPrice()))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"mrwtx1618/py_all","sub_path":"super函数 (2).py","file_name":"super函数 (2).py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"70367489338","text":"import os\nimport glob\nimport sys\n\n'''this script is for find all the remains list and split them into evenly seperately list and process simultaneously'''\nsplitnum = 4#change this when u want to split into more parts\n\ncurrent_working_path = os.path.dirname(os.path.abspath(__file__))\n\nfor directory in os.listdir(os.path.join(current_working_path, \"Libs\")):\n sys.path.insert(0, os.path.join(os.path.join(current_working_path, \"Libs\"), directory))\n\nimport inputParameters\n#the readin path which contains all the csv input\nos.chdir(inputParameters.read_directory)\n#print(inputParameters.read_directory)\nextension = 'csv'\ninputcsvs = [i for i in glob.glob('*.{}'.format(extension))]\ninputname_set = set([name.split('.csv')[0] for name in inputcsvs])\nprint('input csv total num: ',len(inputname_set))\n\n#the storage path wich contains all the csv output that have the same name as the input part\nos.chdir(inputParameters.store_directory)\n#print(inputParameters.store_directory)\nextension = 'csv'\noutputcsvs = [i for i in glob.glob('*.{}'.format(extension))]\noutputname_set = set([name.split('.csv')[0] for name in outputcsvs])\nprint('out excel total num: ',len(outputname_set))\n\n#get the remains set\nremains_set = inputname_set.difference(outputname_set)\nprint('remains csv length: ',len(remains_set))\nremains_csv_list = [name+'.csv' for name in list(remains_set)]\nprint(remains_csv_list)\n\n\n#for stable running I use pickle to store it and read back in the run_01\n#Pickling\nimport pandas as pd\ndf = pd.DataFrame({'remainlist':remains_csv_list})\ndf.to_csv(os.path.join(current_working_path, \"remainlist.csv\"))\n","repo_name":"chuqiaoshen/twitter_sentimentClassification_pipeline","sub_path":"findRemains.py","file_name":"findRemains.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"4172766075","text":"# -*- coding: utf-8 -*-\n\n# 系统模块\nimport os\nimport ast\nimport inspect\nfrom functools import partial\n\n# 自定义模块\nfrom Bigfish.utils.log import FilePrinter\nfrom Bigfish.store.directory import UserDirectory\nfrom Bigfish.utils.export import export, SeriesFunction\nfrom Bigfish.event.handle import SymbolsListener\nfrom Bigfish.utils.ast import LocalsInjector, SeriesExporter, SystemFunctionsDetector\nfrom Bigfish.utils.common import check_time_frame\nfrom Bigfish.models.common import HasID\n\n\n########################################################################\ndef set_parameters(paras):\n pass\n\n\nclass Strategy(HasID):\n ATTR_MAP = dict(timeframe=\"time_frame\", base=\"capital_base\", symbols=\"symbols\", start=\"start_time\", end=\"end_time\",\n maxlen=\"max_length\")\n\n # ----------------------------------------------------------------------\n def __init__(self, engine, user, name, code, symbols=None, time_frame=None, start_time=None, end_time=None):\n \"\"\"Constructor\"\"\"\n self.__id = self.next_auto_inc()\n self.user = user\n self.user_dir = UserDirectory(user)\n self.name = name\n self.code = code\n self.engine = engine\n self.time_frame = time_frame\n self.symbols = symbols\n self.start_time = start_time\n self.end_time = end_time\n self.max_length = 0\n self.capital_base = 100000\n self.handlers = {}\n self.listeners = {}\n self.system_functions = {}\n self.series_storage = {}\n self.__printer = FilePrinter(user, name, self.engine)\n self.__context = {}\n # 是否完成了初始化\n self.trading = False\n # 在字典中保存Open,High,Low,Close,Volumn,CurrentBar,MarketPosition,\n # 手动为exec语句提供local命名空间\n self.__locals_ = dict(sell=partial(self.engine.sell, strategy=self.__id),\n short=partial(self.engine.short, strategy=self.__id),\n buy=partial(self.engine.buy, strategy=self.__id),\n cover=partial(self.engine.cover, strategy=self.__id),\n marketposition=self.engine.get_current_positions(),\n currentcontracts=self.engine.get_current_contracts(), data=self.engine.get_data,\n context=self.__context, export=partial(export, strategy=self),\n put=self.put_context, get=self.get_context, print=self.__printer.print,\n listeners=self.listeners, system_functions=self.system_functions,\n )\n # 将策略容器与对应代码文件关联\n self.bind_code_to_strategy(self.code)\n\n # -----------------------------------------------------------------------\n def get_output(self):\n with open(self.__printer.get_path()) as f:\n content = f.read()\n f.close()\n return content\n\n # ----------------------------------------------------------------------\n def get_parameters(self):\n return {key: value.get_parameters() for key, value in self.listeners.items()}\n\n # ----------------------------------------------------------------------\n def set_parameters(self, parameters):\n for handle, paras in parameters.items():\n self.listeners[handle].set_parameters(**paras)\n\n # ----------------------------------------------------------------------\n def get_id(self):\n return self.__id\n\n # ----------------------------------------------------------------------\n def put_context(self, **kwargs):\n for key, value in kwargs.items():\n self.__context[key] = value\n\n # ----------------------------------------------------------------------\n def get_context(self, key):\n return self.__context[key]\n\n # ----------------------------------------------------------------------\n def initialize(self):\n self.__context.clear()\n self.series_storage.clear()\n\n # ----------------------------------------------------------------------\n # 将策略容器与策略代码关联\n def bind_code_to_strategy(self, code):\n def get_parameter_default(paras, name, check, default, pop=True):\n if pop:\n para = paras.pop(name, None)\n else:\n para = paras.get(name, None)\n if para:\n temp = para.default\n if temp == inspect._empty:\n # TODO未给定值的处理\n return default\n elif check(temp):\n return temp\n else:\n raise KeyError(\"变量%s所赋值不合法\", name)\n else:\n return default\n\n def get_global_attrs(locals_):\n for name, attr in self.ATTR_MAP.items():\n if getattr(self, attr) is None:\n setattr(self, attr, locals_.get(name))\n\n def set_global_attrs(globals_):\n for name, attr in self.ATTR_MAP.items():\n value = getattr(self, attr)\n if value is not None:\n globals_[name] = value\n else:\n raise ValueError('全局变量%s未被赋值' % name)\n\n signal_locals_ = {}\n function_locals = {}\n signal_globals_ = {} # 可动态管理的全策略命名空间\n function_globals_ = {} # 可动态管理的系统函数命名空间\n # 获取并检查一些全局变量的设定\n exec(compile(code, \"[Strategy:%s]\" % self.name, mode=\"exec\"), signal_globals_, signal_locals_)\n get_global_attrs(signal_locals_)\n set_global_attrs(signal_globals_)\n set_global_attrs(function_globals_)\n signal_globals_.update(self.__locals_)\n function_globals_.update(self.__locals_)\n self.engine.set_capital_base(self.capital_base)\n self.engine.start_time = self.start_time\n self.engine.end_time = self.end_time\n check_time_frame(self.time_frame)\n # get the system functions in use\n ast_ = ast.parse(code)\n sys_func_detector = SystemFunctionsDetector()\n sys_func_detector.visit(ast_)\n sys_func_dir = self.user_dir.get_sys_func_dir()\n funcs_in_use = sys_func_detector.get_funcs_in_use()\n\n # get the instructions to inject to every handle\n signal_instructions = {}\n function_instructions = {}\n code_lines = [\"import functools\", \"__globals = globals()\"]\n code_lines.extend([\"{0} = __globals['{0}']\".format(key) for key in self.__locals_.keys()\n if key not in [\"sell\", \"buy\", \"short\", \"cover\"]])\n for key, value in signal_locals_.items():\n if inspect.isfunction(value):\n if key == \"init\":\n self.handlers['init'] = value\n # TODO init中可以设定全局变量,所以要以\"global foo\"的方式进行注入,监听的事件不同所以要改写SymbolsListener\n continue\n paras = inspect.signature(value).parameters.copy()\n ishandle = get_parameter_default(paras, \"ishandle\", lambda x: isinstance(x, bool), True)\n if not ishandle:\n continue\n custom = get_parameter_default(paras, \"custom\", lambda x: isinstance(x, bool), False)\n if not custom:\n # TODO加入真正的验证方法\n symbols = get_parameter_default(paras, \"symbols\", lambda x: True, self.symbols)\n time_frame = get_parameter_default(paras, \"timeframe\", check_time_frame, self.time_frame)\n max_length = get_parameter_default(paras, \"maxlen\", lambda x: isinstance(x, int) and (x > 0),\n self.max_length)\n self.engine.add_symbols(symbols, time_frame, max_length)\n self.listeners[key] = SymbolsListener(self.engine, symbols, time_frame)\n additional_instructions = [\"{0} = system_functions['%s.%s'%('{1}','{0}')]\".format(f, key)\n for f in funcs_in_use.keys()] + ['del(system_functions)']\n temp = []\n # TODO 加入opens等,这里字典的嵌套结构\n temp.extend([\"%s = __globals['data']()['%s']['%s']['%s']\" % (field, symbols[0], time_frame, field)\n for field in [\"open\", \"high\", \"low\", \"close\", \"time\", \"volume\"]])\n temp.extend([\"{0} = __globals['listeners']['{1}'].{0}\".format('get_current_bar', key)])\n function_instructions[key] = code_lines + temp + [\"del(functools)\", \"del(__globals)\"] + \\\n additional_instructions\n temp.extend([\"{0} = functools.partial(__globals['{0}'],listener={1})\".format(\n field, self.listeners[key].get_id())\n for field in [\"buy\", \"short\", \"sell\", \"cover\"]])\n signal_instructions[key] = code_lines + temp + [\"del(functools)\", \"del(__globals)\"] + \\\n additional_instructions\n else:\n # TODO自定义事件处理\n pass\n for para_name in paras.keys():\n # TODO加入类型检测\n default = get_parameter_default(paras, para_name, lambda x: True, None,\n pop=False)\n if default is None:\n raise ValueError('参数%s未指定初始值' % para_name)\n elif not isinstance(default, (int, float)):\n raise ValueError('参数%s的值必须为整数或浮点数', para_name)\n self.listeners[key].add_parameters(para_name, default)\n series_exporter = SeriesExporter() # deal with the export syntax\n # export the system functions in use\n for func, signal in funcs_in_use.items():\n fullname = os.path.join(sys_func_dir, func + \".py\")\n with open(fullname) as f:\n func_ast = ast.parse(f.read())\n f.close()\n function_injector = LocalsInjector({func: function_instructions[signal]})\n function_injector.visit(func_ast)\n func_ast = series_exporter.visit(func_ast)\n # TODO 多个handle时需要对每个handle调用的系统函数建立独立的系统函数\n exec(compile(func_ast, \"[SysFunctions:%s]\" % func, mode=\"exec\"), function_globals_, function_locals)\n self.system_functions['%s.%s' % (signal, func)] = SeriesFunction(function_locals[func], signal)\n # new方法是对__init__的封装,创建SeriesFunction对象所需信息有其所在的函数体本身,signal和其运行时传入的参数,\n # 编译时所能确定的只有前者,采用偏函数的方式将两者结合到一起\n # inject global vars into locals of handler\n signal_injector = LocalsInjector(signal_instructions)\n signal_injector.visit(ast_)\n ast_ = series_exporter.visit(ast_)\n # TODO 解决行号的问题\n exec(compile(ast_, \"[Strategy:%s]\" % self.name, mode=\"exec\"), signal_globals_, signal_locals_)\n for key in signal_instructions.keys():\n self.listeners[key].set_generator(signal_locals_[key])\n print(\"<%s>信号添加成功\" % self.name)\n if 'init' in self.handlers:\n self.handlers['init']()\n return True\n\n # ----------------------------------------------------------------------\n def start(self):\n \"\"\"\n 启动交易\n 这里是最简单的改变self.trading\n 有需要可以重新实现更复杂的操作\n \"\"\"\n self.trading = True\n self.initialize()\n for listener in self.listeners.values():\n listener.start()\n for function in self.system_functions.values():\n function.start()\n self.__printer.start()\n print(self.name + u'开始运行')\n\n # ----------------------------------------------------------------------\n def stop(self):\n \"\"\"\n 停止交易\n 同上\n \"\"\"\n self.trading = False\n for listener in self.listeners.values():\n listener.stop()\n for function in self.system_functions.values():\n function.stop()\n self.__printer.stop()\n print(self.name + u'停止运行')\n","repo_name":"tiw-xh138/Bigfish","sub_path":"Bigfish/core/_strategy.py","file_name":"_strategy.py","file_ext":"py","file_size_in_byte":12725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"7799561353","text":"import sys\nfrom itertools import combinations\n\nsys.stdin = open('input.txt')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n n = int(input())\n synergy = [list(map(int, input().split())) for _ in range(n)]\n food = list(range(n))\n m = n // 2\n sub_set = []\n ans = 100\n food_a = list(combinations(food, m))\n for a in food_a:\n if len(a) == 2:\n taste_a = synergy[a[0]][a[1]] + synergy[a[1]][a[0]]\n else:\n a_sub = list(combinations(a, 2))\n taste_a = 0\n for sub in a_sub:\n taste_a += synergy[sub[0]][sub[1]] + synergy[sub[1]][sub[0]]\n b = list(set(food) - set(a))\n if len(b) == 2:\n taste_b = synergy[b[0]][b[1]] + synergy[b[1]][b[0]]\n else:\n b_sub = list(combinations(b, 2))\n taste_b = 0\n for sub in b_sub:\n taste_b += synergy[sub[0]][sub[1]] + synergy[sub[1]][sub[0]]\n gap = abs(taste_a - taste_b)\n ans = min(ans, gap)\n print(f'#{tc} {ans}')\n","repo_name":"hallov012/Algo_hallov","sub_path":"2022.03/0312/SWEA_4012_요리사/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"23"} +{"seq_id":"15278670569","text":"# Returns the first and last day of a week given as \"2020_11\"\nimport datetime\n\ndef date_to_yearweek(d:datetime):\n year, weeknumber, weekday = d.date().isocalendar()\n return str(year) + \"_\" + str(weeknumber)\n\ndef yearweek_to_dates(yw:str):\n year, week = yw.split(\"_\")\n year, week = int(year), int(week)\n\n first = datetime.datetime(year, 1, 1)\n base = 1 if first.isocalendar()[1] == 1 else 8\n monday = first + datetime.timedelta(days=base - first.isocalendar()[2] + 7 * (week - 1))\n sunday = monday + datetime.timedelta(days=6)\n thursday = monday + datetime.timedelta(days=3)\n return monday, thursday, sunday\n\ndef date_to_quarter(dt):\n return \"Q{}\".format( (dt.month+2) // 3 )","repo_name":"smangalik/county_topics_did","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"11067225231","text":"import torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass LSTM_CNN_3_layers(nn.Module):\r\n def __init__(self, seq_length, img_size,kernel_s_layer_1, kernel_s, max1, max2, max3,out_channels_1, out_channels_2, out_channels_final, LSTM_hidden, LSTM_layers, p=0.0):\r\n super(LSTM_CNN_3_layers, self).__init__()\r\n self.seq_length = seq_length\r\n self.max_1 = max1\r\n self.max_2 = max2\r\n self.max_3 = max3\r\n self.lstm_size = LSTM_hidden\r\n self.lstm_lay = LSTM_layers\r\n self.conv1 = nn.Conv2d(1, out_channels_1, kernel_size=kernel_s_layer_1)\r\n self.cnn_bn1 = nn.BatchNorm2d(out_channels_1)\r\n self.conv2 = nn.Conv2d(out_channels_1, out_channels_2, kernel_size=kernel_s)\r\n self.cnn_bn2 = nn.BatchNorm2d(out_channels_2)\r\n self.conv3 = nn.Conv2d(out_channels_2, out_channels_final, kernel_size=kernel_s)\r\n self.cnn_bn3 = nn.BatchNorm2d(out_channels_final)\r\n self.img_size = img_size\r\n\r\n size_after_cnn = int((int((int((img_size - kernel_s_layer_1 + 1) / self.max_1) - kernel_s + 1) / self.max_2) - kernel_s + 1) / self.max_3)\r\n self.out_size_cnn = out_channels_final*size_after_cnn**2\r\n self.LSTM = nn.LSTM(input_size=self.out_size_cnn , hidden_size=LSTM_hidden, num_layers=self.lstm_lay, dropout=p, batch_first=True)\r\n self.fc1 = nn.Linear(in_features=self.seq_length * self.lstm_size, out_features=2000)\r\n self.bn1 = nn.BatchNorm1d(num_features=2000)\r\n self.fc2 = nn.Linear(in_features=2000, out_features=2000)\r\n self.bn2 = nn.BatchNorm1d(num_features=2000)\r\n self.fc3 = nn.Linear(in_features= 2000, out_features= 750)\r\n self.bn3 = nn.BatchNorm1d(num_features=750)\r\n self.fc4 = nn.Linear(in_features= 750, out_features= seq_length)\r\n\r\n def forward(self, t):\r\n batch_s, timesteps,C, H, W = t.size()\r\n t = t.view(batch_s * timesteps, 1, H, W)\r\n t = F.max_pool2d(F.leaky_relu(self.cnn_bn1(self.conv1(t))), kernel_size=self.max_1, stride=self.max_1)\r\n t = F.max_pool2d(F.leaky_relu(self.cnn_bn2(self.conv2(t))), kernel_size=self.max_2, stride=self.max_2)\r\n t = F.max_pool2d(F.leaky_relu(self.cnn_bn3(self.conv3(t))), kernel_size=self.max_3, stride=self.max_3)\r\n t = t.reshape(batch_s, self.seq_length, self.out_size_cnn)\r\n t, (h_n, h_c) = self.LSTM(t)\r\n t = t.reshape(batch_s, self.lstm_size*self.seq_length)\r\n\r\n t = F.leaky_relu(self.bn1(self.fc1(t)))\r\n\r\n t = F.leaky_relu(self.bn2(self.fc2(t)))\r\n t = F.leaky_relu(self.bn3(self.fc3(t)))\r\n t = self.fc4(t)\r\n return t\r\n\r\n","repo_name":"AntonioGarciaGarvi/Celegans-Lifespan-Automation-Using-Deep-Learning","sub_path":"Demo_files/cnn_lstm_model.py","file_name":"cnn_lstm_model.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"8870649329","text":"from flask import Flask, flash, redirect, render_template, request, session, abort, make_response, url_for\nimport json\n\nfrom document import Database\nfrom document.Feature import Feature\nfrom forms import FeatureForm\n\napp = Flask(__name__)\napp.config.from_object('config')\napp.secret_key = \"super secret key\"\n\nMd = Database.MongoDatabase()\ndb = Md.db\n\n@app.route('/')\ndef home():\n\treturn render_template('home.html')\n\n@app.route('/managefeature')\ndef manageFeature():\n\tfeatures = db.Features.find()\n\n\treturn render_template('manageFeature.html', datas=features)\n\n@app.route('/examplesolo')\ndef exampleSolo():\n\treturn render_template('exampleSolo.html')\n\n@app.route('/examplemulti')\ndef exampleMulti():\n\treturn render_template('exampleMulti.html')\n\n@app.route('/featureIsOk', methods=['POST'])\ndef featureIsOk():\n\tfeature_id = request.form['featureId']\n\tcountry = request.form['country']\n\t\n\tfeature = Feature(feature_id)\n\tstates = feature.getStates()\n\tdata = (\"0\", \"1\")['Activate' == states[country]]\n\t\n\tresp = make_response(json.dumps(data))\n\tresp.status_code = 200\n\tresp.headers['Access-Control-Allow-Origin'] = '*'\n\treturn resp\n\n@app.route('/featuresAreOk', methods=['POST'])\ndef featuresAreOk():\n\tfeatures = request.get_json()['listFeature']\n\tdatas = {}\n\tfor feature in features:\n\t\tobj = features[feature]\n\t\tfeatureId = obj['id']\n\t\tfeature = Feature(featureId)\n\t\tstates = feature.getStates()\n\t\tcountry = obj['country']\n\t\tstate = (\"0\", \"1\")['Activate' == states[country]]\n\t\tdatas[obj['pubId']] = state\n\t\n\tresp = make_response(json.dumps(datas))\n\tresp.status_code = 200\n\tresp.headers['Access-Control-Allow-Origin'] = '*'\n\treturn resp\n\n@app.route('/addfeature', methods=['GET', 'POST'])\ndef addfeature():\n\tform = FeatureForm()\n\tif form.validate_on_submit():\n\t\tfeature = Feature()\n\t\tfeature._name = form.name.data\n\t\tfeature._state_us = form.stateUs.data\n\t\tfeature._state_en = form.stateEn.data\n\t\tfeature._state_de = form.stateDe.data\n\t\tfeature._state_cl = form.stateCl.data\n\t\tfeature._state_fi = form.stateFi.data\n\t\tfeature._state_it = form.stateIt.data\n\t\tfeature._state_jp = form.stateJp.data\n\t\tfeature._state_es = form.stateEs.data\n\t\tfeature._state_ru = form.stateRu.data\n\t\tfeature._state_fr = form.stateFr.data\n\n\t\t#insert val in new feature\n\t\tfeature.commit()\n\n\t\tflash('New feature successfuly created added.')\n\n\t\treturn redirect(url_for('managefeature'))\n\n\treturn render_template('addEditFeature.html', form=form, type=\"create\")\n\n\n@app.route('/editfeature/', methods=['GET', 'POST'])\ndef editfeature(feature_id):\n\tdbFeature = db.features\n\tfeature = Feature(feature_id)\n\tif feature is None:\n\t\tflash('feature doesnt exist.')\n\n\t\treturn redirect(url_for('home'))\n\n\tform = FeatureForm()\n\n\tif form.validate_on_submit():\n\t\tfeature._name = form.name.data\n\t\tfeature._state_us = form.stateUs.data\n\t\tfeature._state_en = form.stateEn.data\n\t\tfeature._state_de = form.stateDe.data\n\t\tfeature._state_cl = form.stateCl.data\n\t\tfeature._state_fi = form.stateFi.data\n\t\tfeature._state_it = form.stateIt.data\n\t\tfeature._state_jp = form.stateJp.data\n\t\tfeature._state_es = form.stateEs.data\n\t\tfeature._state_ru = form.stateRu.data\n\t\tfeature._state_fr = form.stateFr.data\n\n\t\tfeature.commit()\n\n\t\tflash('feature successfuly edited.')\n\n\t\treturn redirect(url_for('managefeature'))\n\n\t# pas reussis a faire marcher\n\t#form.populate_obj(feature)\n\tif request.method == 'GET':\n\t\tform.name.data = feature._name\n\t\tform.stateUs.data = feature._state_us\n\t\tform.stateEn.data = feature._state_en\n\t\tform.stateDe.data = feature._state_de\n\t\tform.stateCl.data = feature._state_cl\n\t\tform.stateFi.data = feature._state_fi\n\t\tform.stateIt.data = feature._state_it\n\t\tform.stateJp.data = feature._state_jp\n\t\tform.stateEs.data = feature._state_es\n\t\tform.stateRu.data = feature._state_ru\n\t\tform.stateFr.data = feature._state_fr\n\n\treturn render_template('addEditFeature.html', form=form, type=\"edit\", id=feature._id)\n\nif __name__ == \"__main__\":\n\tapp.run()","repo_name":"shedelin/TestFeatureFliping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"15593685542","text":"from .models import Car\nfrom django import forms\nfrom administrators.models import ParkingLot\nfrom django.utils.html import format_html\nfrom paypal.standard.forms import PayPalPaymentsForm\nfrom localflavor.us.forms import USStateSelect, USStateField\nfrom localflavor.us.us_states import STATE_CHOICES\n\n\nclass RegisterCarForm(forms.ModelForm):\n \"\"\"Register form to for adding a car\"\"\"\n\n # parking = forms.ModelChoiceField(queryset=ParkingLot.objects.all(), widget=forms.Select(\n # attrs={'class': 'form-control'}))\n\n state = forms.ChoiceField(choices=STATE_CHOICES, widget=forms.Select(attrs={'class': 'form-control'}))\n\n class Meta:\n model = Car\n fields = ('make', 'model', 'license_plate_number', 'state',)\n\n\nclass UpdateParkingCarForm(forms.ModelForm):\n \"\"\"Form to for updating car parking\"\"\"\n\n parking = forms.ModelChoiceField(queryset=ParkingLot.objects.all(), widget=forms.Select(\n attrs={'class': 'form-control'}))\n\n class Meta:\n model = Car\n fields = ('parking',)\n\n\nclass ParkingLotMembership(forms.Form):\n \"\"\"Form for customer to join parking lots as members\"\"\"\n parking_lot = forms.ModelChoiceField(queryset=ParkingLot.objects.all(), widget=forms.HiddenInput)\n\n\nclass CustomPayPalPaymentsForm(PayPalPaymentsForm):\n\n def render(self, *args, **kwargs):\n if not args and not kwargs:\n return format_html(\n \"\"\"
{1} \n
\"\"\",\n self.get_login_url(),\n self.as_p(),\n ''\n )\n else:\n return super().render(*args, **kwargs)\n","repo_name":"Cole-Brooks/SeniorDesignProject","sub_path":"customers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"18729994369","text":"import smtplib\nimport string\n \nHOST = \"smtp.gmail.com\"\nSUBJECT = \"Test email from Python\"\nTO = \"test@qq.com\"\nFROM = \"test@gmail.com\"\ntext = \"Python rules them all!\"\nBODY = string.join((\n \"From: %s\" % FROM,\n \"To: %s\" % TO,\n \"Subject: %s\" % SUBJECT ,\n \"\",\n text\n ), \"\\r\\n\")\nserver = smtplib.SMTP()\nserver.connect(HOST,\"25\")\nserver.starttls()\nserver.login(\"test@gmail.com\",\"123456\")\nserver.sendmail(FROM, [TO], BODY)\nserver.quit()\n","repo_name":"anzhihe/learning","sub_path":"python/source_code/Python_Automated_Operations/第二章/smtplib/simple1.py","file_name":"simple1.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1535,"dataset":"github-code","pt":"23"} +{"seq_id":"35813474034","text":"# 수 정렬하기 3 (브1)\n# https://www.acmicpc.net/problem/10989\n\n# 정렬 문제\n# 오름차순 정렬 결과 출력\n# 주어지는 수는 10,000보다 작거나 같은 자연수\n\n# ===== 문제 풀이 2 - 계수 정렬 사용 - 맞춤 =====\nimport sys\n\nn = int(sys.stdin.readline())\ncount = [0] * (10000+1)\n\n# 리스트에 담는 만큼 메모리를 잡아먹는다.\n# 굳이 리스트에 값을 담을 필요는 없다\n\n# array = []\n# for _ in range(n):\n# array.append(int(sys.stdin.readline()))\n\nfor _ in range(n):\n num = int(sys.stdin.readline())\n count[num] += 1\n# print(array)\n\nfor i in range(len(count)):\n for j in range(count[i]):\n print(i)\n\n# ==== 문제 풀이1 - 메모리 초과 발생 ====\n\n# import sys\n# input = sys.stdin.readline\n\n# n = int(input())\n\n# num_list = []\n# for _ in range(n):\n# num = int(input())\n# num_list.append(num)\n\n# num_list.sort()\n# print(*num_list, sep='\\n')\n\n","repo_name":"bbugi/python_algorithm","sub_path":"backjoon/완료/10989(브1).py","file_name":"10989(브1).py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"5331596469","text":"class Node:\n def __init__(self, val):\n self.left:Node = None \n self.right:Node = None \n self.value:Node = val \n self.depth:int = 0\n\nclass Tree:\n def __init__(self, root:Node):\n self.root:Node = root\n\n '''\n This function returns the max depth of the tree with root \n provided for the constructor using iterative depth first traversal.\n DFS type: post order\n '''\n def max_depth(self):\n\n if self.root is None:\n #if there are no nodes, the depth is -1\n return -1\n \n max_depth = -1\n self.root.depth = 0\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.depth > max_depth:\n max_depth = node.depth\n\n if node.left is not None:\n node.left.depth = node.depth + 1\n stack.append(node.left)\n\n if node.right is not None:\n node.right.depth = node.depth + 1\n stack.append(node.right)\n\n return max_depth\n \n''' \n 1\n 2 3\n4 5\n 6\nmax depth = 2\nTest by changing the tree\n'''\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\n\nn1.left = n2\nn1.right = n3 \nn2.left = n4 \nn2.right = n5 \nn5.left = n6\n\nt = Tree(n1)\nprint(t.max_depth())","repo_name":"drajamanthri/data-structures","sub_path":"binary-tree/dfs/max-depth/solution_iterative.py","file_name":"solution_iterative.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"22469500348","text":"from typing import Dict, Union, List\n\nfrom qdexplus.dex_insns_item import DexInsnsItem\nfrom qdexplus.utils import convertUleb128BytesToInt, convertIntToUleb128Bytes, convertSleb128BytesToInt, \\\n convertIntToSleb128Bytes\nfrom qstruct.base import QType, QStructField\nfrom qstruct.contrib import android_dexfile\nfrom qstruct.contrib.android_dexfile import QDexStruct, DexTypeId, DexMethodId, DexFieldId, DexClassDef, DexTypeItem, \\\n DexMapItem, kDexTypeCodeItem\nfrom qstruct.primary import QUInt8, QTChar, QUInt32\nfrom qstruct.qarray import QArray32\nfrom qstruct.qpointer import QPointer32\n\nQDexStruct.q_def_pointer_cls = QPointer32\nQDexStruct.array_wrapper = QArray32\n\n\nclass DexDynamicArray(object):\n q_size_attr = ['size']\n q_list_attr = ['list']\n q_objsize: int\n\n @classmethod\n def class_on_create(cls):\n if type(cls.q_size_attr) != list:\n size_attr = [cls.q_size_attr]\n cls.q_size_attr = size_attr\n if type(cls.q_list_attr) != list:\n list_attr = [cls.q_list_attr]\n cls.q_list_attr = list_attr\n size_attr = cls.q_size_attr\n list_attr = cls.q_list_attr\n num = len(size_attr)\n assert len(list_attr) == num\n for i in range(num):\n extra_on_field_change = 'on_change_' + size_attr[i]\n setattr(cls, extra_on_field_change, cls.dex_list_size_change)\n\n def dex_list_size_change(self, field: QStructField, attr_name, list_size, osize):\n idx = self.q_size_attr.index(attr_name)\n list_attr = self.q_list_attr[idx]\n vector = getattr(self, list_attr)\n # vector.set_dynamic(True)\n vector.set_length(list_size)\n # vector.set_dynamic(False)\n\n\nclass DexMapList(DexDynamicArray, android_dexfile.DexMapList):\n pass\n\n\nclass DexTypeList(DexDynamicArray, android_dexfile.DexTypeList):\n pass\n\n\nclass DexAnnotationSetItem(DexDynamicArray, android_dexfile.DexAnnotationSetItem):\n q_list_attr = 'entries'\n\n\nclass DexStringData(DexDynamicArray, QDexStruct):\n q_list_attr = 'str'\n size = QUInt8\n str = (QTChar[0]).extend(fmt=lambda _, x: (b''.join(x)).decode())\n\n\nclass DexStringId(QDexStruct):\n stringDataOff = DexStringData * 1 # file offset to string_data_item\n\n\nclass DexProtoId(QDexStruct):\n q_desc = 'Direct-mapped \"proto_id_item\".'\n shortyIdx = QUInt32 # index into stringIds for shorty descriptor\n returnTypeIdx = QUInt32 # index into typeIds list for return type\n parametersOff = DexTypeList * 1 # file offset to type_list for parameter types\n\n\nclass DexHeader(android_dexfile.DexHeader):\n mapOff = (DexMapList * 1)\n stringIdsOff = DexStringId[0] * 1\n typeIdsOff = DexTypeId[0] * 1\n protoIdsOff = DexProtoId[0] * 1\n methodIdsOff = DexMethodId[0] * 1\n fieldIdsOff = DexFieldId[0] * 1\n classDefsOff = DexClassDef[0] * 1\n\n off_watch = ['stringIdsOff', 'typeIdsOff', 'protoIdsOff', 'methodIdsOff', 'fieldIdsOff', 'classDefsOff']\n size_watch = [i[:-3] + 'Size' for i in off_watch]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.stringIds = []\n self.typeIds = []\n self.protoIds = []\n self.methodIds = []\n self.fieldIds = []\n self.classDefs = []\n\n def on_field_change(self, field: QStructField, name, nval, oval):\n if name in self.off_watch:\n if nval <= 0:\n return\n idx = self.off_watch.index(name)\n size = getattr(self, self.size_watch[idx])\n vector = getattr(self, name)[0] # equal getattr(self, name)*1,take out the pointer value\n vector.set_length(size)\n vector.fetch_value()\n elif name in self.size_watch:\n idx = self.size_watch.index(name)\n vector = getattr(self, self.off_watch[idx])\n if vector > 0:\n vector[0].set_length(nval)\n\n def finish_load(self):\n if self.stringIdsOff > 0:\n stringIds = self.stringIds\n stringIds.clear()\n stringIdsOff: List[DexStringId] = self.stringIdsOff * 1\n for dex_string_id in stringIdsOff:\n dex_string: DexStringData = dex_string_id.stringDataOff * 1\n stringIds.append(str(dex_string.str))\n else:\n return\n if self.typeIdsOff > 0:\n typeIds = self.typeIds\n typeIds.clear()\n dex_typeids: List[DexTypeId] = self.typeIdsOff * 1\n for typeid in dex_typeids:\n tsid = typeid.descriptorIdx.value()\n data = stringIds[tsid]\n typeIds.append(data)\n else:\n return\n if self.protoIdsOff <= 0:\n return\n protoIds = self.protoIds\n protoIds.clear()\n dex_proto_ids: List[DexProtoId] = self.protoIdsOff * 1\n for item in dex_proto_ids:\n sidx = item.shortyIdx.value()\n name = stringIds[sidx]\n ret = typeIds[item.returnTypeIdx.value()]\n type_list: DexTypeList = item.parametersOff[0]\n params = []\n if type_list is not None and type_list.size > 0:\n tlist: List[DexTypeItem] = type_list.list\n for type_item in tlist:\n ps = typeIds[type_item.typeIdx.value()]\n params.append(ps)\n protoIds.append(\"{} {}({})\".format(ret, name, ''.join(params)))\n if self.methodIdsOff <= 0:\n return\n\n methodIds = self.methodIds\n methodIds.clear()\n dex_method_ids: List[DexMethodId] = self.methodIdsOff * 1\n for dex_method_id in dex_method_ids:\n class_name = typeIds[dex_method_id.classIdx.value()]\n proto = protoIds[dex_method_id.protoIdx.value()]\n name = stringIds[dex_method_id.nameIdx.value()]\n methodIds.append(\"{} --> {} {}\".format(class_name, proto, name))\n print('\\n'.join(methodIds))\n if self.fieldIdsOff <= 0:\n return\n dex_field_ids: List[DexFieldId] = self.fieldIdsOff * 1\n fieldIds = self.fieldIds\n fieldIds.clear()\n for dex_field_id in dex_field_ids:\n class_name = typeIds[dex_field_id.classIdx.value()]\n type_name = typeIds[dex_field_id.typeIdx.value()]\n name = stringIds[dex_field_id.nameIdx.value()]\n fieldIds.append('{} --> {} {}'.format(class_name, type_name, name))\n\n if self.classDefsOff <= 0:\n return\n classDefs = self.classDefs\n classDefs.clear()\n dex_class_defs: List[DexClassDef] = self.classDefsOff * 1\n for dex_class_def in dex_class_defs:\n class_idx = dex_class_def.classIdx.value()\n cls_name = typeIds[class_idx]\n access_flags = dex_class_def.accessFlags.value()\n super_class = typeIds[dex_class_def.superclassIdx.value()]\n source_file = stringIds[dex_class_def.sourceFileIdx.value()]\n # if dex_class_def.interfacesOff.value():\n # dex_type_list = DexTypeList(dex_class_def.interfacesOff)\n # if dex_class_def.annotationsOff.value():\n # annotations_dir = DexAnnotationsDirectoryItem(dex_class_def.annotationsOff)\n # if dex_class_def.classDataOff.value():\n # dex_class_data: DexClassData = dex_class_def.classDataOff[0]\n # if dex_class_data.directMethods.get_length() > 0:\n # pass\n cls_des = \"{}:{} ---> {} {} extends {} {}\".format(class_idx, source_file, access_flags, cls_name,\n super_class, '')\n print(cls_des)\n classDefs.append(cls_des)\n if self.mapOff <= 0:\n return\n dex_map_list = self.mapOff * 1\n map_list: List[DexMapItem] = dex_map_list.list\n for dex_map_item in map_list:\n item_type = dex_map_item.type.value()\n if item_type == kDexTypeCodeItem:\n ArrayType = DexCode[dex_map_item.size]\n items = ArrayType(dex_map_item.offset)\n self.parse_code_items(items)\n\n def parse_code_items(self, dex_code_items: List['DexCode']):\n for dex_code_item in dex_code_items:\n insns: List[DexInsnsItem] = dex_code_item.insns\n for insns_item in insns:\n print(insns_item.desc)\n kind = insns_item.kind\n kind_x = insns_item.kind_x\n proto_x = insns_item.proto_x\n if kind is None and proto_x is None:\n continue\n if proto_x is not None:\n proto = self.protoIds[proto_x]\n else:\n proto = None\n kind_desc = None\n if kind:\n if 'string'.__eq__(kind):\n kind_desc = self.stringIds[kind_x]\n elif 'type'.__eq__(kind):\n kind_desc = self.typeIds[kind_x]\n elif 'field'.__eq__(kind):\n kind_desc = self.fieldIds[kind_x]\n elif 'meth'.__eq__(kind):\n kind_desc = self.methodIds[kind_x]\n elif 'site'.__eq__(kind):\n raise NotImplemented\n elif 'proto'.__eq__(kind):\n proto = self.protoIds[kind_x]\n elif 'vtaboff'.__eq__(kind):\n raise NotImplemented\n elif 'fieldoff'.__eq__(kind):\n raise NotImplemented\n fmt = insns_item.format(kind_desc, proto)\n print(fmt)\n\n\n# class DexClassData(QDexStruct):\nassert DexStringId.q_objsize == 4\nassert DexProtoId.q_objsize == 12\nassert DexHeader.q_objsize == android_dexfile.DexHeader.q_objsize\n\n\nclass Qleb128(QType):\n q_dynamic = True\n q_def_value = 0\n q_bs = b'\\x00'\n q_objsize = 1\n q_value = None\n decode = None\n encode = None\n\n def fetch_dy_value(self):\n # addr = self.address()\n bs = self.read_self(5)\n v, size = self.decode(bs)\n self.set_value(v)\n assert self.q_objsize == size\n return True\n\n def set_value(self, val):\n self.q_bs, q_objsize = self.encode(val)\n super().set_value(val)\n self.set_objsize(q_objsize)\n\n def serial(self) -> Union[Dict[int, bytes], bytes]:\n return self.q_bs\n\n def unserial(self, datas, offset: int = 0):\n self.set_value(self.decode(datas[offset:offset + 5])[0])\n\n\nclass QUleb128(Qleb128):\n decode = staticmethod(convertUleb128BytesToInt)\n encode = staticmethod(convertIntToUleb128Bytes)\n\n\nclass QSleb128(Qleb128):\n decode = staticmethod(convertSleb128BytesToInt)\n encode = staticmethod(convertIntToSleb128Bytes)\n\n\n# -------------DexClass-------------------\n\nclass DexClassDataHeader(QDexStruct):\n staticFieldsSize = QUleb128\n instanceFieldsSize = QUleb128\n directMethodsSize = QUleb128\n virtualMethodsSize = QUleb128\n\n\nassert DexClassDataHeader.q_objsize == 4\n\n\nclass DexField(QDexStruct):\n q_desc = \"\"\"\n expanded form of encoded_field\n \"\"\"\n fieldIdx = QUleb128 # index to a field_id_item\n accessFlags = QUleb128\n\n\nclass DexMethod(QDexStruct):\n \"\"\"\n expanded form of encoded_method\n \"\"\"\n\n methodIdx = QUleb128 # index to a method_id_item\n accessFlags = QUleb128\n codeOff = QUleb128 # file offset to a code_item\n\n\nclass DexClassData(QDexStruct):\n q_monitor_change = True\n \"\"\"\n expanded form of class_data_item. Note: If a particular item is\n * absent (e.g., no static fields), then the corresponding pointer\n * is set to NULL.\n \"\"\"\n\n header = DexClassDataHeader\n staticFields = DexField[0]\n instanceFields = DexField[0]\n directMethods = DexMethod[0]\n virtualMethods = DexMethod[0]\n\n # def on_leaf_value_change(self, child, leaf, val, cids):\n # print(cids, val)\n\n def on_leaf_change_header_staticFieldsSize(self, child, leaf, val):\n self.staticFields.set_length(val)\n\n def on_leaf_change_header_instanceFieldsSize(self, child, leaf, val):\n self.instanceFields.set_length(val)\n\n def on_leaf_change_header_directMethodsSize(self, child, leaf, val):\n self.directMethods.set_length(val)\n\n def on_leaf_change_header_virtualMethodsSize(self, child, leaf, val):\n self.virtualMethods.set_length(val)\n\n\nclass DexTypeAddrPairData(QDexStruct):\n type_idx = QUleb128\n addr = QUleb128\n\n\nclass DexCatchHandlerData(QDexStruct):\n size = QSleb128\n handlers = DexTypeAddrPairData[0]\n catch_all_addr = QUleb128[0]\n\n\nclass DexCatchHandlerListData(DexDynamicArray, QDexStruct):\n size = QUleb128\n list = DexCatchHandlerData[0]\n\n\nclass DexCode(DexDynamicArray, android_dexfile.DexCode):\n q_list_attr = [\"try_item\"]\n q_size_attr = [\"triesSize\"]\n try_item = android_dexfile.DexTry[0]\n handlers = DexCatchHandlerListData[0]\n\n insns = (DexInsnsItem[0])\n\n def on_leaf_value_change(self, child, leaf, val, cids: List[str]):\n print(cids, val)\n\n def on_field_change(self, field: QStructField, name, nval, oval):\n if name == 'triesSize':\n if self.triesSize > 0:\n self.handlers.set_length(1)\n elif name == 'insnsSize':\n if self.insnsSize > 0:\n self.insns.size_fix(self.insnsSize.value() * 2)\n else:\n assert self.insnsSize == 0\n self.insns.set_length(0)\n\n\nclass DexClassDef(android_dexfile.DexClassDef):\n classDataOff = DexClassData * 1\n","repo_name":"WanQingGit/qdexplus","sub_path":"dex_struct.py","file_name":"dex_struct.py","file_ext":"py","file_size_in_byte":12263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"43640464782","text":"exp = input('Digite uma expresão: ')\nlista1 = []\nlista2 = []\nfor c in exp:\n if '(' in c:\n lista1.append(c)\n elif ')' in c:\n lista2.append(c)\nif len(lista1) == len(lista2):\n print('Expressão válida')\nelse:\n print('Expressão ínválida')\nprint('---'*20)\nprint('FIM DO PROGRAMA !!!')\nprint('---'*20)","repo_name":"Fiirmino/Curso-Python-Mundo-2","sub_path":"Listas/Ex05 - Validando expressões matemáticas.py","file_name":"Ex05 - Validando expressões matemáticas.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18253092131","text":"from django.shortcuts import render, redirect\nfrom Branches.models import Branch\nfrom .models import Deposit, Withdraw, Treasur\nfrom django.contrib import messages\n\ndef treasury(request, branch_id):\n branch = Branch.objects.get(pk=branch_id)\n treasur = branch.treasur_set.first()\n \n if request.method == 'POST':\n process = request.POST['process']\n amount = request.POST['amount']\n notes = request.POST['notes']\n\n if process == 'deposit':\n deposit = Deposit(amount=amount, notes=notes, treasur=treasur)\n deposit.save()\n treasur.total_cash += int(amount)\n branch.capital_equity += int(amount)\n messages.add_message(request, messages.INFO, 'Added Deposit successfully. ' + str(deposit))\n\n elif process == 'withdraw':\n withdraw = Withdraw(amount=amount, notes=notes, treasur=treasur)\n withdraw.save()\n treasur.total_cash -= int(amount)\n branch.capital_equity -= int(amount)\n messages.add_message(request, messages.INFO, 'Added Withdraw successfully. ' + str(withdraw))\n\n return redirect('/branches/' + str(branch.pk) + '/treasury/')\n\n template = \"Treasury/treasury.html\"\n context = {\n 'branch': branch,\n 'active': 'treasury'\n }\n return render(request, template, context=context)","repo_name":"mostafa-yasen/Car-Fair-Management","sub_path":"Treasury/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23172771346","text":"from classes.field import PropertyField, Street\nfrom classes.field import HousesNumError\nfrom classes.player import Player\nimport pytest\n\n\nclass TestField:\n field = PropertyField(0, 'start', 'blue', 50, {\n \"base_price\": 100}, {\"mortgage\": 50})\n other_field = PropertyField(\n 20, 'train station', 'green', 50, {\"base_price\": 100}, {\"mortgage\": 50})\n\n def test_set_rent(self):\n assert self.field.base_rent() == 50\n assert self.field.current_rent() == 50\n self.field.set_current_rent(100)\n assert self.field.current_rent() == 100\n\n def test_set_rent_exception(self):\n with pytest.raises(ValueError):\n self.field.set_current_rent(-10)\n\n def test_set_owner(self):\n player = Player()\n assert self.field.owner() is None\n self.field.set_owner(player)\n assert self.field.owner() == player\n\n def test_mortgage(self):\n self.field.do_mortgage()\n assert self.field.is_mortgaged()\n assert self.field.current_rent() == 0\n\n def test_lift_mortgage(self):\n self.field.lift_mortgage()\n assert not self.field.is_mortgaged()\n assert self.field.current_rent() == self.field.base_rent()\n\n def test_return_to_bank(self):\n player = Player()\n self.field.set_owner(player)\n self.field.do_mortgage()\n self.field.return_to_bank()\n assert self.field.owner() is None\n assert not self.field.is_mortgaged()\n\n def test_total_value(self):\n assert self.field.total_value() == self.field.price() / 2\n self.field.do_mortgage()\n assert self.field.total_value() == 0\n\n\nclass TestStreetField:\n field_id = 1\n name = 'west avenue'\n colour = 'green'\n rent = 100\n other_rents = {\n \"w_one_house\": 30,\n \"w_two_houses\": 90,\n \"w_three_houses\": 180,\n \"w_four_houses\": 320,\n \"w_hotel\": 550,\n \"mortgage\": 30\n }\n prices = {\n \"base_price\": 100,\n \"house_cost\": 50,\n \"hotel_cost\": 50\n }\n\n def test_add_house(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n assert street.houses_num() == 0\n street.add_house()\n assert street.houses_num() == 1\n street.add_house()\n assert street.houses_num() == 2\n\n def test_add_fifth_house(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n street.add_house()\n street.add_house()\n street.add_house()\n street.add_house()\n with pytest.raises(HousesNumError):\n street.add_house()\n\n def test_add_hotel(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n street.add_house()\n street.add_house()\n street.add_house()\n street.add_house()\n street.add_hotel()\n assert street.hotel() is True\n\n def test_second_hotel_error(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n street.add_house()\n street.add_house()\n street.add_house()\n street.add_house()\n street.add_hotel()\n with pytest.raises(HousesNumError):\n street.add_hotel()\n\n def test_too_many_houses(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n street.add_house()\n street.add_house()\n with pytest.raises(HousesNumError):\n street.add_hotel()\n\n def test_update_rent(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n assert street.current_rent() == self.rent\n street.add_house()\n assert street.current_rent() == self.other_rents['w_one_house']\n street.add_house()\n assert street.current_rent() == self.other_rents['w_two_houses']\n street.add_house()\n assert street.current_rent() == self.other_rents['w_three_houses']\n street.add_house()\n assert street.current_rent() == self.other_rents['w_four_houses']\n street.add_hotel()\n assert street.current_rent() == self.other_rents['w_hotel']\n\n def test_house_cost_hotel_cost(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n assert street.house_cost() == self.prices['house_cost']\n assert street.hotel_cost() == self.prices['hotel_cost']\n\n def test_return_to_bank(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n player = Player()\n street.set_owner(player)\n street.add_house()\n street.return_to_bank()\n assert street.owner() is None\n assert street.houses_num() == 0\n\n def test_total_value(self):\n street = Street(self.field_id, self.name, self.colour,\n self.rent, self.prices, self.other_rents)\n street.add_house()\n assert street.total_value() == street.price() / 2 + street.house_cost()\n street.add_house()\n assert street.total_value() == street.price() / 2 \\\n + street.house_cost() * 2\n","repo_name":"jmacuga/Monopoly-game","sub_path":"tests/test_field.py","file_name":"test_field.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74849529325","text":"from reaper_python import *\nimport os\nimport platform\nimport unicodedata\n\n# from AppKit import *\n\nimport platform, os\n\ndef winGetClipboard():\n ctypes.windll.user32.OpenClipboard(0)\n pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT\n data = ctypes.c_char_p(pcontents).value\n #ctypes.windll.kernel32.GlobalUnlock(pcontents)\n ctypes.windll.user32.CloseClipboard()\n return data\n\ndef winSetClipboard(text):\n text = str(text)\n GMEM_DDESHARE = 0x2000\n ctypes.windll.user32.OpenClipboard(0)\n ctypes.windll.user32.EmptyClipboard()\n try:\n # works on Python 2 (bytes() only takes one argument)\n hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)\n except TypeError:\n # works on Python 3 (bytes() requires an encoding)\n hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)\n pchData = ctypes.windll.kernel32.GlobalLock(hCd)\n try:\n # works on Python 2 (bytes() only takes one argument)\n ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))\n except TypeError:\n # works on Python 3 (bytes() requires an encoding)\n ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))\n ctypes.windll.kernel32.GlobalUnlock(hCd)\n ctypes.windll.user32.SetClipboardData(1, hCd)\n ctypes.windll.user32.CloseClipboard()\n\ndef macSetClipboard(text):\n text = str(text)\n outf = os.popen('pbcopy', 'w')\n outf.write(text)\n outf.close()\n\ndef macGetClipboard():\n outf = os.popen('pbpaste', 'r')\n content = outf.read()\n outf.close()\n return content\n\ndef gtkGetClipboard():\n return gtk.Clipboard().wait_for_text()\n\ndef gtkSetClipboard(text):\n global cb\n text = str(text)\n cb = gtk.Clipboard()\n cb.set_text(text)\n cb.store()\n\ndef qtGetClipboard():\n return str(cb.text())\n\ndef qtSetClipboard(text):\n text = str(text)\n cb.setText(text)\n\ndef xclipSetClipboard(text):\n text = str(text)\n outf = os.popen('xclip -selection c', 'w')\n outf.write(text)\n outf.close()\n\ndef xclipGetClipboard():\n outf = os.popen('xclip -selection c -o', 'r')\n content = outf.read()\n outf.close()\n return content\n\ndef xselSetClipboard(text):\n text = str(text)\n outf = os.popen('xsel -i', 'w')\n outf.write(text)\n outf.close()\n\ndef xselGetClipboard():\n outf = os.popen('xsel -o', 'r')\n content = outf.read()\n outf.close()\n return content\n\n\nif os.name == 'nt' or platform.system() == 'Windows':\n import ctypes\n getcb = winGetClipboard\n setcb = winSetClipboard\nelif os.name == 'mac' or platform.system() == 'Darwin':\n getcb = macGetClipboard\n setcb = macSetClipboard\nelif os.name == 'posix' or platform.system() == 'Linux':\n xclipExists = os.system('which xclip') == 0\n if xclipExists:\n getcb = xclipGetClipboard\n setcb = xclipSetClipboard\n else:\n xselExists = os.system('which xsel') == 0\n if xselExists:\n getcb = xselGetClipboard\n setcb = xselSetClipboard\n try:\n import gtk\n getcb = gtkGetClipboard\n setcb = gtkSetClipboard\n except Exception:\n try:\n import PyQt4.QtCore\n import PyQt4.QtGui\n app = PyQt4.QApplication([])\n cb = PyQt4.QtGui.QApplication.clipboard()\n getcb = qtGetClipboard\n setcb = qtSetClipboard\n except:\n raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')\n\n\ndef msg(m):\n RPR_ShowConsoleMsg(str(m) + \"\\n\") # Ausgabe\n\ntext = getcb()\n\n# text = unicode(text)\n# text = unicodedata.normalize('NFD', unicode(utext)).encode('ascii','ignore')\n# utext = text.decode(\"utf-8\")\n#text = unicodedata.normalize('NFKD', unicode(utext) )\n# text = unicodedata.normalize('NFKD', unicode(utext).encode('ascii','ignore')\n# utext = xutext.decode(\"utf-8\")\n# text = xutext.encode(\"ascii\",\"ignore\")\n\nif (RPR_GetPlayState() == 0) or (RPR_GetPlayState() == 2): # 0 = Stop, 2 = Pause\n\tcurrent_position = RPR_GetCursorPosition() # Position of edit-cursor\nelse:\n\tcurrent_position = RPR_GetPlayPosition() # Position of play-cursor\n\nRPR_AddProjectMarker2(0, False, current_position, 0, text, 0, 0xFF0000|0x1000000) # set red edit-marker\n","repo_name":"sreimers/Installer","sub_path":"Modules/Plugin/Scripts/Payload/ultraschall_set_namedmarker.py","file_name":"ultraschall_set_namedmarker.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14751669775","text":"# Import required libraries\nimport streamlit as st\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom models import Job, Member, CaregivingTypeEnum, Base\nimport pandas as pd\nimport datetime\nimport time\n\n# Database Setup\nengine = create_engine('sqlite:///dbms.db')\nBase.metadata.create_all(engine)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# Streamlit UI Setup\nst.title('Job Management System')\n\n# Utility Functions\ndef create_job(session, **job_data):\n job = Job(**job_data)\n session.add(job)\n try:\n session.commit()\n return True, \"Job Created Successfully\"\n except Exception as e:\n session.rollback()\n return False, str(e)\n\ndef get_job_by(session, **kwargs):\n filters = {k: v for k, v in kwargs.items() if v}\n return session.query(Job).filter_by(**filters).all()\n\ndef display_jobs(jobs):\n job_df = pd.DataFrame([{c.name: getattr(j, c.name) for c in Job.__table__.columns} for j in jobs])\n st.dataframe(job_df, hide_index=True)\n\n# Job Creation Form\nwith st.form('job_form'):\n st.write('Create a New Job')\n # Job input fields\n job_data = {\n 'member_user_id': st.selectbox('Member ID', [x[0] for x in session.query(Member.member_user_id).all()]),\n 'required_caregiving_type': st.selectbox('Required Caregiving Type', [t.name for t in CaregivingTypeEnum]),\n 'other_requirements': st.text_area('Other Requirements'),\n 'date_posted': st.date_input('Date Posted', datetime.date.today())\n }\n\n submitted = st.form_submit_button('Create Job')\n if submitted:\n success, message = create_job(session, **job_data)\n if success:\n st.success(message)\n else:\n st.error(message)\n\n# Job Search Form\nwith st.form('job_search'):\n st.write('Search Jobs')\n search_params = {\n 'job_id': st.text_input('By Job ID'),\n 'member_user_id': st.text_input('By Member ID'),\n 'required_caregiving_type': st.selectbox('By Caregiving Type', [''] + [t.name for t in CaregivingTypeEnum])\n }\n\n searched = st.form_submit_button('Search')\n if searched:\n jobs = get_job_by(session, **search_params)\n display_jobs(jobs)\n\n# Update Job Section\nst.subheader('Update or Delete Job')\nall_jobs = session.query(Job).all()\nselected_job = st.selectbox('Select a job to update or delete', options=all_jobs, format_func=lambda x: f\"Job ID: {x.job_id}, Caregiving Type: {x.required_caregiving_type}\")\n\nif selected_job:\n if st.button('Edit This Job'):\n st.session_state['job_id_to_edit'] = selected_job.job_id\n\n # Job Update Form\n if 'job_id_to_edit' in st.session_state:\n with st.form(\"update_job_form\"):\n st.write('Edit Job Details')\n update_data = {\n 'required_caregiving_type': st.selectbox('New Caregiving Type', [t.name for t in CaregivingTypeEnum], index=[t.name for t in CaregivingTypeEnum].index(selected_job.required_caregiving_type)),\n 'other_requirements': st.text_area('New Other Requirements', value=selected_job.other_requirements),\n 'date_posted': st.date_input('New Date Posted', selected_job.date_posted)\n }\n save_changes = st.form_submit_button('Save Changes')\n if save_changes:\n job_to_update = session.query(Job).filter_by(job_id=st.session_state['job_id_to_edit']).first()\n for key, value in update_data.items():\n setattr(job_to_update, key, value)\n session.commit()\n st.success('Job Updated')\n del st.session_state['job_id_to_edit']\n time.sleep(1)\n st.rerun()\n\n # Job Deletion\n if st.button('Delete This Job'):\n job_to_delete = session.query(Job).filter_by(job_id=selected_job.job_id).first()\n session.delete(job_to_delete)\n session.commit()\n st.success('Job Deleted')\n time.sleep(1)\n st.rerun()\nelse:\n st.write(\"No job selected or job does not exist.\")\n","repo_name":"AlimTleuliyev/dbms-ass2","sub_path":"pages/5_🗂️_Job.py","file_name":"5_🗂️_Job.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"43190121295","text":"'''\nWrite a script that takes in a number from the user as input and prints the following structure.\n\nSuppose the input is 5, you will output\n*\n* *\n* * * \n* * * *\n* * * * * \ni.e. number of rows will be 5, 1st row will have 1 star, 2nd row will have 2 stars, 3rd row 3 stars, 4th row will have 4 stars and 5th row will have 5 stars.\n\nAnother example: if input is 3, you will output\n*\n* *\n* * *\n\nHint: Think of nested for loops\n\n'''\nusr_inp = int(input(\"Please chose a number: \"))\n\nfor row in range(1, usr_inp + 1):\n for column in range(row):\n print(\"*\", end=\" \")\n print()\n","repo_name":"Tyserie/coding_nomads_python_labs","sub_path":"04_conditionals_loops/04_08_star_pyramid.py","file_name":"04_08_star_pyramid.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38488111584","text":"'''Day 16 of Advent of Code 2020\n\nThe prompt is to examine tickets in an incomprehensible language.\n'''\n\nfrom argparse import ArgumentParser\nfrom pathlib import PurePath\nfrom time import process_time\nimport re\n\ndef main(file_path):\n '''Read input from file, produce solutions, and print them.\n '''\n with open(file_path, 'r') as f:\n rules, yours, nearby = parse_input(f.readlines())\n\n\n t0 = process_time()\n solution = part1(rules, nearby)\n t1 = process_time()\n print(\"Day 16 Part 1 solution: {}\".format(solution))\n print('Run time: {:0.2f} milliseconds'.format((t1-t0)*1000))\n\n t0 = process_time()\n solution = part2(rules, yours, nearby)\n t1 = process_time()\n print(\"Day 16 Part 2 solution: {}\".format(solution))\n print('Run time: {:0.2f} milliseconds'.format((t1-t0)*1000))\n\n\ndef parse_input(lines):\n rules = {}\n yours = None\n nearby = []\n\n section = 0\n for line in lines:\n if line == '\\n':\n section += 1\n\n elif section == 0:\n tokens = re.findall('^[\\w ]+|\\d+', line)\n rule_name = tokens[0]\n num_ranges = []\n for i in range(1, len(tokens)-1, 2):\n num_ranges.append(list(range(int(tokens[i]), int(tokens[i+1])+1)))\n\n rules[rule_name] = set().union(*num_ranges)\n\n elif section == 1:\n yours = [int(n) for n in re.findall('\\d+', line)]\n\n elif section == 2:\n numbers = [int(n) for n in re.findall('\\d+', line)]\n if numbers:\n nearby.append(numbers)\n\n return rules, yours, nearby\n\n\ndef part1(rules, nearby):\n all_rule_numbers = get_all_rule_numbers(rules)\n\n rate = 0\n for n in nearby:\n rate += sum(list(set(n) - all_rule_numbers))\n\n return rate\n\n\ndef part2(rules, yours, nearby):\n all_rule_numbers = get_all_rule_numbers(rules)\n\n valid_nearby = [s for s in nearby if is_ticket_valid(all_rule_numbers, s)]\n valid = valid_nearby + [yours]\n\n field_sets = get_field_sets(valid)\n num_fields = len(field_sets)\n\n rule_indices = {r: get_possible_indices(v, field_sets) for r, v in rules.items()}\n\n while any([len(v) > 1 for v in rule_indices.values()]):\n process_of_elimination(rule_indices)\n\n acc = 1\n for r, v in rule_indices.items():\n if re.match('^departure', r):\n i = v.pop()\n acc *= yours[i]\n\n return acc\n\n\ndef get_field_sets(nearby):\n field_sets = [set() for _ in range(len(nearby[0]))]\n\n for n in nearby:\n for i, v in enumerate(n):\n field_sets[i].add(v)\n\n return field_sets\n \n\ndef get_all_rule_numbers(rules):\n return set().union(*list(rules.values()))\n\n\ndef is_ticket_valid(rule_numbers, ticket):\n return set(ticket).issubset(rule_numbers)\n\n\ndef get_possible_indices(rule_values, field_sets):\n possible_indices = set()\n\n for i, fs in enumerate(field_sets):\n if fs.issubset(rule_values):\n possible_indices.add(i)\n\n return possible_indices\n\n\ndef process_of_elimination(set_map):\n to_eliminate = set()\n to_review = []\n\n for k, s in set_map.items():\n if len(s) == 1:\n to_eliminate = to_eliminate.union(s)\n else:\n to_review.append(s)\n\n for s in to_review:\n s -= to_eliminate\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description=main.__doc__)\n parser.add_argument('-f', type=PurePath, help='the input file')\n args = parser.parse_args()\n\n main(args.f)\n","repo_name":"bneb/advent-of-code-2020","sub_path":"day16/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"24562021938","text":"from django import forms\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\nfrom tower import ugettext_lazy as _\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\n\n\nREMOVE_BUG_URL = \"https://bugzilla.mozilla.org/enter_bug.cgi?assigned_to=nobody%40mozilla.org&bug_file_loc=http%3A%2F%2F&bug_ignored=0&bug_severity=normal&bug_status=NEW&cf_fx_iteration=---&cf_fx_points=---&comment=Please%20delete%20my%20MDN%20account.%20My%20username%20is%3A%0D%0A%0D%0A[username]&component=User%20management&contenttypemethod=autodetect&contenttypeselection=text%2Fplain&defined_groups=1&flag_type-4=X&flag_type-607=X&flag_type-791=X&flag_type-800=X&flag_type-803=X&form_name=enter_bug&maketemplate=Remember%20values%20as%20bookmarkable%20template&op_sys=All&priority=--&product=Mozilla%20Developer%20Network&rep_platform=All&short_desc=Account%20deletion%20request%20for%20[username]&status_whiteboard=[account-mod]&target_milestone=---&version=unspecified&format=__standard__\"\nREMOVE_MESSAGE = _(u\"Sorry, you must have at least one connected account so \"\n u\"you can log in. To remove this account connect a \"\n u\"different one first. To delete your MDN account please \"\n u'file a bug.')\n\n\nclass KumaAccountAdapter(DefaultAccountAdapter):\n\n def is_open_for_signup(self, request):\n \"\"\"\n We disable the signup with regular accounts as we require Persona\n (for now)\n \"\"\"\n return False\n\n def clean_username(self, username):\n \"\"\"\n When signing up make sure the username isn't already used by\n a different user.\n \"\"\"\n username = super(KumaAccountAdapter, self).clean_username(username)\n if User.objects.filter(username=username).exists():\n raise forms.ValidationError(_(u'The username you entered '\n u'already exists.'))\n return username\n\n def message_templates(self, *names):\n return tuple('messages/%s.txt' % name for name in names)\n\n def add_message(self, request, level, message_template,\n message_context={}, extra_tags='', *args, **kwargs):\n \"\"\"\n Adds an extra \"account\" tag to the success and error messages.\n \"\"\"\n # let's ignore some messages\n if message_template.endswith(self.message_templates('logged_in',\n 'logged_out')):\n return\n\n # promote the \"account_connected\" message to success\n if message_template.endswith(self.message_templates('account_connected')):\n level = messages.SUCCESS\n\n # when a next url is set because of a multi step sign-in\n # (e.g. sign-in with github, verified mail is found in Persona\n # social accounts, agree to first log in with Persona to connect\n # instead) we ignore the message \"account connected\" message as\n # it would be misleading\n if 'sociallogin_next_url' in request.session:\n return\n\n # and add an extra tag to the account messages\n extra_tag = 'account'\n if extra_tags:\n extra_tags += ' '\n extra_tags += extra_tag\n\n super(KumaAccountAdapter, self).add_message(request, level,\n message_template,\n message_context,\n extra_tags,\n *args, **kwargs)\n\n\nclass KumaSocialAccountAdapter(DefaultSocialAccountAdapter):\n\n def is_open_for_signup(self, request, sociallogin):\n \"\"\"\n We specifically enable social accounts as a way to signup\n because the default adapter uses the account adpater above\n as the default.\n \"\"\"\n return True\n\n def validate_disconnect(self, account, accounts):\n \"\"\"\n Validate whether or not the socialaccount account can be\n safely disconnected.\n \"\"\"\n if len(accounts) == 1:\n raise forms.ValidationError(REMOVE_MESSAGE %\n {'bug_form_url': REMOVE_BUG_URL})\n","repo_name":"federicoculloca/kuma","sub_path":"kuma/users/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"28427597667","text":"def rajzol(koordinata):\n racs = [['O ' for x in range(3)] for y in range(3)]\n x, y = koordinata\n racs[x][y] = '+ '\n for sor in racs:\n print(''.join(sor))\n\nwhile True:\n try:\n x, y = map(int, input(\"Adja meg a koordinátát (pl.: 0 2): \").split())\n if x in [0, 1, 2] and y in [0, 1, 2]:\n break\n else:\n print(\"A koordinátáknak 0, 1 vagy 2 értékekkel kell rendelkezniük.\")\n except:\n print(\"Érvénytelen bemenet.\")\n\nrajzol((x, y))","repo_name":"kizsi2019/22_10D2","sub_path":"Jakab Máté/python/feladat 2.2.py","file_name":"feladat 2.2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"hu","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"34117630931","text":"import requests\nimport sys\n\nBASE_URL = 'http://localhost:5001'\n\n\ndef novoLivro():\n nome = input('Digite o nome do livro: ')\n return nome\n\n\ndef main():\n print('### Biblioteca de Livros')\n\n while True:\n print('\\nINÍCIO:')\n print('0) Para sair')\n print('1) Para ver o estoque da biblioteca')\n print('2) Para alugar livros')\n escolha = input('Escolha: ')\n\n if escolha == '1':\n while True:\n print('\\nMENU:')\n print('1) Inserir Livro')\n print('2) Listar Livros')\n print('3) Atualiza Livro')\n print('0) Voltar para o início')\n op = input('> ')\n if op == '1':\n livro = novoLivro()\n requests.post(f\"{BASE_URL}/book\", json={'nome': livro})\n elif op == '2':\n livros = requests.get(f\"{BASE_URL}/book\").json()\n\n for livro in livros:\n print('Nome do livro:', livro['nome'])\n elif op == '3':\n livro_antigo = input('Nome do livro antigo: ')\n livro_novo = input('\\nNome do livro novo: ')\n requests.put(\n f\"{BASE_URL}/book\", json={'to_update': livro_antigo,\n 'update': livro_novo})\n elif op == '0':\n break\n else:\n print('Opcao invalida!')\n elif escolha == '2':\n print('1) Ver livros alugados')\n print('2) Alugar livro')\n\n aluguel_escolha = input('Escolha:')\n\n print('\\n')\n if aluguel_escolha == '1':\n alugueis = requests.get(f\"{BASE_URL}/rent\").json()\n\n for aluguel in alugueis:\n print(f\"Cliente: {aluguel['cliente']}\")\n print(f\"Livro: {aluguel['livro']}\\n\")\n elif aluguel_escolha == '2':\n cliente = input('Seu nome: ')\n livro = input('Livro para alugar: ')\n\n requests.post(f\"{BASE_URL}/rent\",\n json={'nome': livro, 'cliente': cliente})\n elif escolha == '0':\n sys.exit(0)\n else:\n print('Opção inválida')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matheusinit/python-java-microservices","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3111714363","text":"import logging\nimport os\n\nfrom twisted.web._element import renderer, Element\nfrom twisted.web.template import XMLFile\nfrom vortex.DeferUtil import deferToThreadWrapWithLogger\n\nlogger = logging.getLogger(__name__)\n\n\nclass AdminAuthElement(Element):\n xmlFileName = \"AdminAuthTemplate.xml\"\n loader = XMLFile(os.path.join(os.path.dirname(__file__), xmlFileName))\n\n def __init__(self, failed, failureMsg):\n self._failed = failed\n self._failureMsg = failureMsg\n\n @renderer\n @deferToThreadWrapWithLogger(logger)\n def loginTitle(self, request, tag):\n return tag(b\"Login to Peek Admin\")\n\n @renderer\n @deferToThreadWrapWithLogger(logger)\n def metaDescriptionContent(self, request, tag):\n return tag(content=b\"...\")\n\n @renderer\n def errorPanel(self, request, tag):\n if self._failed:\n if self._failureMsg:\n return tag(b\"Failed to login : %s\" % self._failureMsg.encode(\"utf-8\"))\n return tag(b\"Failed to login\")\n return b\"\"\n","repo_name":"Synerty/peek-logic-service","sub_path":"peek_logic_service/backend/auth/AdminAuthElement.py","file_name":"AdminAuthElement.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"74454836847","text":"import os\nimport pathlib\nimport random\n\nimport flask\nimport flask.wrappers\n\nfrom src import process_model\nfrom src import ui\nfrom src import simulation_engine\n\n\napp = flask.Flask(__name__, template_folder=\"../templates\", static_folder=\"../static\")\napp.config.from_prefixed_env()\nsimulator = simulation_engine.Simulator()\n\nfor _ in range(10):\n simulation = simulator.queue_simulation(\n model_id=process_model.ModelId(\"models/petri_net_1\"),\n simulation_parameters=simulation_engine.SimulationParameters(),\n )\n if random.random() < 0.3:\n simulation = simulator.start_simulation(simulation)\n if random.random() < 0.5:\n simulator.finish_simulation(simulation, simulation_engine.SimulationResult())\n\n\ndef get_file_tree(root_dir: pathlib.Path) -> dict[str, dict[str, bool]]:\n file_tree = {}\n for root, _dirs, files in os.walk(root_dir):\n current_level = file_tree\n path = root.split(os.sep)\n for dir in path:\n if dir not in current_level:\n current_level[dir] = {}\n current_level = current_level[dir]\n for file in files:\n if file.endswith(\".pm\"):\n model_type = process_model.ProcessModelType.from_path(pathlib.Path(root) / file)\n current_level[file] = model_type.name.replace(\"_\", \" \").capitalize()\n return file_tree\n\n\n@app.route(\"/new_model\", methods=[\"POST\"])\ndef new_model() -> flask.Response:\n model_id = \"data/models/\" + flask.request.form[\"model_id\"]\n model_type = process_model.ProcessModelType(flask.request.form[\"model_type\"])\n model_factory = process_model.model_type_to_class(model_type)\n model = model_factory(id=process_model.ModelId(model_id), model_type=model_type)\n model.save(pathlib.Path(model_id))\n\n return flask.redirect(f\"/edit?model_id={model_id}\") # type: ignore\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index() -> flask.Response:\n return flask.make_response(\n flask.render_template(\n \"pages/welcome_page.html\",\n file_tree=get_file_tree(pathlib.Path(\"data/models\")),\n model_types=[\n (model_type.name.replace(\"_\", \" \"), model_type.value) for model_type in process_model.ProcessModelType\n ],\n )\n )\n\n\n@app.route(\"/edit\", methods=[\"GET\"])\ndef edit_model() -> flask.Response:\n global simulation_queue\n\n model_id = flask.request.args[\"model_id\"]\n model_type = process_model.ProcessModelType.from_path(pathlib.Path(model_id))\n\n return flask.make_response(\n flask.render_template(\n \"pages/editor_page.html\",\n file_tree=get_file_tree(pathlib.Path(\"data/models\")),\n current_model_id=model_id,\n model_type=model_type.name.replace(\"_\", \" \"),\n toolbar_buttons=ui.get_toolbar_buttons(model_type),\n model_types=[\n (model_type.name.replace(\"_\", \" \"), model_type.value) for model_type in process_model.ProcessModelType\n ],\n simulation_queue=map(\n ui.SimulationQueueListItem.from_simulation,\n simulator.finished_simulations + simulator.running_simulations + simulator.queued_simulations,\n ),\n )\n )\n\n\n@app.route(\"/queue_simulation\", methods=[\"POST\"])\ndef queue_simulation() -> flask.Response:\n model_id = flask.request.form[\"model_id\"]\n simulator.queue_simulation(process_model.ModelId(model_id), simulation_engine.SimulationParameters())\n return flask.make_response(\"\", 200)\n\n\n@app.route(\"/healthz\", methods=[\"GET\"])\ndef healthz() -> flask.Response:\n return flask.make_response(\"OK\\n\", 200)\n\n\n@app.route(\"/favicon.ico\", methods=[\"GET\"])\ndef favicon() -> flask.Response:\n return flask.make_response(\"\", 204)\n\n\n@app.errorhandler(404)\ndef page_not_found(error: Exception | None = None) -> flask.Response:\n print(error)\n return flask.make_response(flask.render_template(\"pages/404_page.html\", error=error), 404)\n","repo_name":"askepen/sea2023","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"11731908135","text":"from cProfile import label\nimport imp\nfrom optparse import Values\nfrom flask import Flask, Response, render_template, stream_with_context, request\nimport db_conn.sqlite3_conn as sqlite3_conn\nfrom datetime import datetime\nimport tzlocal\nimport json\nimport time\nimport logging\nimport sys\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')\nlogger = logging.getLogger(__name__)\n\napp = Flask(__name__)\n\ndef update_readings():\n if request.headers.getlist('X-Forwarded-For'):\n client_ip = request.headers.getlist('X-Forwarded-For')[0]\n else:\n client_ip = request.remote_addr or ''\n try:\n logger.info(\"Client %s connected\", client_ip)\n while True:\n db_conn = sqlite3_conn.create_connection('./data/test.db')\n c722_data = sqlite3_conn.get_last_data(db_conn, 1, 'C722')\n c722_time = datetime.fromtimestamp(\n c722_data[0][0], tzlocal.get_localzone()\n ).strftime('%H:%M:%S')\n c722_val = c722_data[0][2]\n labred_data = sqlite3_conn.get_last_data(db_conn, 1, 'LabRed')\n labred_time = datetime.fromtimestamp(\n labred_data[0][0], tzlocal.get_localzone()\n ).strftime('%H:%M:%S')\n labred_val = labred_data[0][2]\n print('C722time: {}, C722value: {}'.format(c722_time, c722_val))\n print('LabRedtime: {}, LabRedvalue: {}'.format(labred_time, labred_val))\n db_conn.close()\n json_data = json.dumps(\n {\n 'C722time': c722_time, \n 'C722value': c722_val,\n 'C722title': 'C722 CO2 level [ppm]',\n 'C722color': 'rgb(0, 128, 128)',\n 'LabRedtime': labred_time, \n 'LabRedvalue': labred_val,\n 'LabRedtitle': 'LabRed CO2 level [ppm]',\n 'LabRedcolor': 'rgb(255, 127, 127)'\n }\n )\n yield f'data:{json_data}\\n\\n'\n time.sleep(30)\n except GeneratorExit:\n logger.info(\"Client %s disconnected\", client_ip)\n\n@app.route('/')\ndef home():\n return render_template('graphs.html')\n\n@app.route('/chart-data')\ndef chart_data():\n response = Response(\n stream_with_context(update_readings()), mimetype = 'text/event-stream'\n )\n response.headers['Cache-Control'] = 'no-cache'\n response.headers['X-Accel-Buffering'] = 'no'\n return response\n\nif __name__ == '__main__':\n print('Start server...')\n app.run(host = '0.0.0.0', threaded=True, debug = True)\n","repo_name":"Ryuuba/dynamic-plot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13567130961","text":"#!/usr/bin/env python3\n\"\"\" Re-written to scrape paragraph tags on a website\n\"\"\"\n\nimport click\nimport requests_html\n\n\n@click.command()\n@click.option(\"--url\", prompt=True, help=\"The URL of the website to check\")\ndef get_paragraph(url):\n \"\"\"Get the content into my memory\"\"\"\n page = requests_html.HTMLSession().get(url)\n paragraphs = page.html.find(\"p\")\n\n with open(\"/tmp/para.txt\", \"a\") as f:\n for p in paragraphs:\n f.write(f\"\\n {p.text}\")\n\n\nif __name__ == \"__main__\":\n get_paragraph()\n","repo_name":"WnndGws/scripts","sub_path":"python/paragraphscraper/paragraph_scraper.py","file_name":"paragraph_scraper.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13938267748","text":"import os\nimport subprocess\nimport json\nfrom io import StringIO\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom pylint.lint import Run\nfrom pylint.reporters.json_reporter import JSONReporter\nfrom tqdm import tqdm\n\n\ndef edulint_analyze(file_path):\n result = subprocess.run(['py', '-m', 'edulint', str(file_path)], text=True, capture_output=True)\n return [msg[msg.rfind(':') + 2:msg.find('[')].replace(' ', '_') for msg in result.stdout.split('\\n') if len(msg) > 0]\n\n\ndef pylint_analyze(file_path):\n pylint_output = StringIO()\n reporter = JSONReporter(pylint_output)\n result = Run([str(file_path)], reporter=reporter, exit=False)\n return [error.symbol for error in result.linter.reporter.messages]\n\n\ndef flake8_analyze(file_path):\n raise RuntimeError('Not implemented!')\n result = subprocess.run(['flake8', '-'], input=code_string, text=True, capture_output=True)\n return result.stdout.split('\\n')\n\n\ndef call_analyze(file_path, mode):\n if mode == 'edulint':\n return edulint_analyze(file_path)\n elif mode == 'pylint':\n return pylint_analyze(file_path)\n elif mode == 'flake8':\n return flake8_analyze(file_path)\n \n raise RuntimeError('Mode not recognized!')\n\n\ndef analyze_string(code_string, mode='edulint', file_path='temp_code.py'):\n with open(file_path, 'w') as f:\n f.write(code_string)\n\n result = call_analyze(file_path, mode)\n\n os.remove(file_path)\n return result\n\n\ndef analyze_strings(code_strings, mode='edulint', result_path='results.json'):\n if mode != 'edulint':\n raise RuntimeError('Not implemented!')\n \n print('Creating code files...')\n dir_path = Path('temp_folder/')\n dir_path.mkdir(parents=True, exist_ok=True)\n for i, code_string in tqdm(enumerate(code_strings)):\n file_path = dir_path / 'temp_code_{}.py'.format(i)\n with open(file_path, mode='w') as f:\n f.write(code_string)\n print('Done!')\n \n print('Processing files...')\n temp_results_path = dir_path / 'results.txt'\n max_i = i\n with open(temp_results_path, mode='w') as temp_results:\n for i in tqdm(range(max_i + 1)):\n file_path = dir_path / 'temp_code_{}.py'.format(i)\n temp_results.write(json.dumps(call_analyze(file_path, mode)))\n temp_results.write(';')\n print('Done!')\n\n print('Cleaning up...')\n with open(temp_results_path, mode='r') as temp_results:\n result = list(map(lambda x: [msg[:msg.find('_')] for msg in json.loads(x + ']')], temp_results.read().split('];')[:-1]))\n json.dump(result, open(result_path, 'w'))\n rmtree(dir_path)\n print('Done!')\n\n print('All finished!')\n\n return result","repo_name":"PavelNedelnik/umimeto-cheating-detection","sub_path":"src/linting.py","file_name":"linting.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"72529799406","text":"\"\"\"\nB-cos DenseNet models\n\nModified from https://github.com/pytorch/vision/blob/0504df5ddf9431909130e7788faf05446bb8/torchvision/models/densenet.py\n\nCIFAR10 modifications from\nhttps://github.com/gpleiss/efficient_densenet_pytorch/blob/master/models/densenet.py\n\"\"\"\nimport math\nimport re\nfrom collections import OrderedDict\nfrom typing import Any, Callable, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom torch import Tensor\nfrom torch.hub import load_state_dict_from_url\n\nfrom bcos.common import BcosUtilMixin\nfrom bcos.modules import BcosConv2d, LogitLayer, norms\n\n__all__ = [\n \"BcosDenseNet\",\n \"densenet121\",\n \"densenet169\",\n \"densenet201\",\n \"densenet161\",\n # c10\n \"cifar10_densenet250\",\n \"cifar10_densenet100\",\n \"cifar10_densenet190\",\n]\n\n\nDEFAULT_NORM_LAYER = norms.NoBias(norms.DetachablePositionNorm2d)\nDEFAULT_CONV_LAYER = BcosConv2d\n\n\nclass _DenseLayer(nn.Module):\n def __init__(\n self,\n num_input_features: int,\n growth_rate: int,\n bn_size: int,\n drop_rate: float,\n memory_efficient: bool = False,\n norm_layer: Callable[..., nn.Module] = DEFAULT_NORM_LAYER,\n conv_layer: Callable[..., nn.Module] = DEFAULT_CONV_LAYER,\n ) -> None:\n super(_DenseLayer, self).__init__()\n # Diff to torchvision: Removed ReLU and replaced BatchNorm with norm_layer\n self.norm1 = norm_layer(num_input_features)\n # Diff to torchvision: Replace Conv2d with ProjectionConv2d\n self.conv1 = conv_layer(\n num_input_features,\n bn_size * growth_rate,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n self.norm2 = norm_layer(bn_size * growth_rate)\n self.conv2 = conv_layer(\n bn_size * growth_rate,\n growth_rate,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n # Diff End\n # Diff End\n self.drop_rate = float(drop_rate)\n self.memory_efficient = memory_efficient\n\n def bn_function(self, inputs: List[Tensor]) -> Tensor:\n concated_features = torch.cat(inputs, 1)\n # Diff to torchvision: Deleted relu\n bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484\n # Diff End\n return bottleneck_output\n\n # todo: rewrite when torchscript supports any\n def any_requires_grad(self, input: List[Tensor]) -> bool:\n for tensor in input:\n if tensor.requires_grad:\n return True\n return False\n\n @torch.jit.unused # noqa: T484\n def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:\n def closure(*inputs):\n return self.bn_function(inputs)\n\n return cp.checkpoint(closure, *input)\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input: List[Tensor]) -> Tensor:\n pass\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input: Tensor) -> Tensor: # noqa: F811\n pass\n\n # torchscript does not yet support *args, so we overload method\n # allowing it to take either a List[Tensor] or single Tensor\n def forward(self, input: Tensor) -> Tensor: # noqa: F811\n if isinstance(input, Tensor):\n prev_features = [input]\n else:\n prev_features = input\n\n if self.memory_efficient and self.any_requires_grad(prev_features):\n if torch.jit.is_scripting():\n raise Exception(\"Memory Efficient not supported in JIT\")\n\n bottleneck_output = self.call_checkpoint_bottleneck(prev_features)\n else:\n bottleneck_output = self.bn_function(prev_features)\n # Diff to torchvision: Deleted relu\n new_features = self.conv2(self.norm2(bottleneck_output))\n # Diff End\n\n if self.drop_rate > 0:\n new_features = F.dropout(\n new_features, p=self.drop_rate, training=self.training\n )\n return new_features\n\n\nclass _DenseBlock(nn.ModuleDict):\n _version = 2\n\n def __init__(\n self,\n num_layers: int,\n num_input_features: int,\n bn_size: int,\n growth_rate: int,\n drop_rate: float,\n memory_efficient: bool = False,\n norm_layer: Callable[..., nn.Module] = DEFAULT_NORM_LAYER,\n conv_layer: Callable[..., nn.Module] = DEFAULT_CONV_LAYER,\n ) -> None:\n super(_DenseBlock, self).__init__()\n for i in range(num_layers):\n layer = _DenseLayer(\n num_input_features + i * growth_rate,\n growth_rate=growth_rate,\n bn_size=bn_size,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n norm_layer=norm_layer,\n conv_layer=conv_layer,\n )\n self.add_module(\"denselayer%d\" % (i + 1), layer)\n\n def forward(self, init_features: Tensor) -> Tensor:\n features = [init_features]\n for name, layer in self.items():\n new_features = layer(features)\n features.append(new_features)\n return torch.cat(features, 1)\n\n\nclass _Transition(nn.Sequential):\n def __init__(\n self,\n num_input_features: int,\n num_output_features: int,\n norm_layer: Callable[..., nn.Module] = DEFAULT_NORM_LAYER,\n conv_layer: Callable[..., nn.Module] = DEFAULT_CONV_LAYER,\n ) -> None:\n super(_Transition, self).__init__()\n # Diff to torchvision: Deleted relu and changed conv to ProjectionConv2d\n self.add_module(\"norm\", norm_layer(num_input_features))\n self.conv = conv_layer(\n num_input_features,\n num_output_features,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n # Diff End\n\n self.add_module(\"pool\", nn.AvgPool2d(kernel_size=2, stride=2))\n\n\nclass BcosDenseNet(BcosUtilMixin, nn.Module):\n r\"\"\"Densenet-BC model class, based on\n `\"Densely Connected Convolutional Networks\" `_.\n\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" `_.\n \"\"\"\n\n def __init__(\n self,\n growth_rate: int = 32,\n block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),\n num_init_features: int = 64,\n bn_size: int = 4,\n drop_rate: float = 0,\n num_classes: int = 1000,\n in_chans: int = 6,\n memory_efficient: bool = False,\n norm_layer: Callable[..., nn.Module] = DEFAULT_NORM_LAYER,\n conv_layer: Callable[..., nn.Module] = DEFAULT_CONV_LAYER,\n small_inputs: bool = False, # True for 32x32 images from gpleiss' impl\n logit_bias: Optional[float] = None,\n logit_temperature: Optional[float] = None,\n ) -> None:\n super(BcosDenseNet, self).__init__()\n\n # First convolution\n # Diff to torchvision: Deleted ReLU, changed Conv2d for ProjectionConv2d and\n # MaxPool for AvgPool\n if small_inputs:\n self.features = nn.Sequential(\n OrderedDict(\n [\n (\n \"conv0\",\n conv_layer(\n in_chans,\n num_init_features,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n )\n ]\n )\n )\n else:\n self.features = nn.Sequential(\n OrderedDict(\n [\n (\n \"conv0\",\n conv_layer(\n in_chans,\n num_init_features,\n kernel_size=7,\n stride=2,\n padding=3,\n ),\n ),\n (\"norm0\", norm_layer(num_init_features)),\n (\"pool0\", nn.AvgPool2d(kernel_size=3, stride=2, padding=1)),\n ]\n )\n )\n # Diff End\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(\n num_layers=num_layers,\n num_input_features=num_features,\n bn_size=bn_size,\n growth_rate=growth_rate,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n norm_layer=norm_layer,\n conv_layer=conv_layer,\n )\n self.features.add_module(\"denseblock%d\" % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(\n num_input_features=num_features,\n num_output_features=num_features // 2,\n norm_layer=norm_layer,\n conv_layer=conv_layer,\n )\n self.features.add_module(\"transition%d\" % (i + 1), trans)\n num_features = num_features // 2\n\n # Final norm layer\n self.features.add_module(\"norm5\", norm_layer(num_features))\n\n # Diff to torchvision: changed Linear layer to BcosConv (conv classifier)\n self.classifier = conv_layer(num_features, num_classes, kernel_size=1)\n self.num_classes = num_classes\n self.logit_layer = LogitLayer(\n logit_temperature=logit_temperature,\n logit_bias=logit_bias or -math.log(num_classes - 1),\n )\n # Diff End\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x: Tensor) -> Tensor:\n # Diff to torchvision: Deleted relu\n features = self.features(x)\n # out = F.relu(features, inplace=True)\n out = self.classifier(features)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.logit_layer(out)\n # Diff End\n return out\n\n def get_classifier(self) -> nn.Module:\n \"\"\"Returns the classifier part of the model. Note this comes before global pooling.\"\"\"\n return self.classifier\n\n def get_feature_extractor(self) -> nn.Module:\n \"\"\"Returns the feature extractor part of the model. Without global pooling.\"\"\"\n return self.features\n\n\ndef _load_state_dict(model: nn.Module, model_url: str, progress: bool) -> None:\n # '.'s are no longer allowed in module names, but previous _DenseLayer\n # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.\n # They are also in the checkpoints in model_urls. This pattern is used\n # to find such keys.\n pattern = re.compile(\n r\"^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$\"\n )\n\n state_dict = load_state_dict_from_url(\n model_url, map_location=\"cpu\", progress=progress, check_hash=True\n )\n for key in list(state_dict.keys()):\n res = pattern.match(key)\n if res:\n new_key = res.group(1) + res.group(2)\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n model.load_state_dict(state_dict)\n\n\ndef _densenet(\n arch: str,\n growth_rate: int,\n block_config: Union[Tuple[int, int, int], Tuple[int, int, int, int]],\n num_init_features: int,\n pretrained: bool,\n progress: bool,\n **kwargs: Any,\n) -> BcosDenseNet:\n model = BcosDenseNet(growth_rate, block_config, num_init_features, **kwargs)\n if pretrained:\n raise ValueError(\n \"If you want to load pretrained weights, then please use the entrypoints in \"\n \"bcos.pretrained or bcos.model.pretrained instead.\"\n )\n return model\n\n\ndef densenet121(\n pretrained: bool = False,\n progress: bool = True,\n num_init_features=64,\n growth_rate=32,\n **kwargs: Any,\n) -> BcosDenseNet:\n return _densenet(\n \"densenet121\",\n growth_rate,\n (6, 12, 24, 16),\n num_init_features,\n pretrained,\n progress,\n **kwargs,\n )\n\n\ndef densenet161(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n return _densenet(\n \"densenet161\", 48, (6, 12, 36, 24), 96, pretrained, progress, **kwargs\n )\n\n\ndef densenet169(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n return _densenet(\n \"densenet169\", 32, (6, 12, 32, 32), 64, pretrained, progress, **kwargs\n )\n\n\ndef densenet201(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n return _densenet(\n \"densenet201\", 32, (6, 12, 48, 32), 64, pretrained, progress, **kwargs\n )\n\n\n# cifar helpers\ndef _get_cifar_block_config(depth: int) -> Tuple[int, int, int]:\n if (depth - 4) % 3:\n raise Exception(\"Invalid depth!\")\n return ((depth - 4) // 6,) * 3\n\n\ndef _update_default_cifar(kwargs: dict) -> None:\n kwargs.setdefault(\"num_classes\", 10)\n kwargs.setdefault(\"small_inputs\", True)\n\n\ndef cifar10_densenet52(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n L = 52\n k = 12\n block_config = _get_cifar_block_config(L)\n _update_default_cifar(kwargs)\n return _densenet(\n f\"cifar10_densenet{L}\", k, block_config, k * 2, pretrained, progress, **kwargs\n )\n\n\n# cifar densenet configs from Sec. 3#Implementation detail from the paper\ndef cifar10_densenet100(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n L = 100\n k = 12\n block_config = _get_cifar_block_config(L)\n _update_default_cifar(kwargs)\n return _densenet(\n f\"cifar10_densenet{L}\", k, block_config, k * 2, pretrained, progress, **kwargs\n )\n\n\ndef cifar10_densenet250(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n L = 250\n k = 24\n block_config = _get_cifar_block_config(L)\n _update_default_cifar(kwargs)\n return _densenet(\n f\"cifar10_densenet{L}\", k, block_config, k * 2, pretrained, progress, **kwargs\n )\n\n\ndef cifar10_densenet190(\n pretrained: bool = False, progress: bool = True, **kwargs: Any\n) -> BcosDenseNet:\n L = 190\n k = 40\n block_config = _get_cifar_block_config(L)\n _update_default_cifar(kwargs)\n return _densenet(\n f\"cifar10_densenet{L}\", k, block_config, k * 2, pretrained, progress, **kwargs\n )\n","repo_name":"sidgairo18/bcos-standalone","sub_path":"bcos/models/densenet.py","file_name":"densenet.py","file_ext":"py","file_size_in_byte":15823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20349242439","text":"import re\n\n\ndef get_file(active_user):\n with open(f\"./users/{active_user}.txt\", \"r\", encoding=\"utf8\") as user: # Открытие файла активного пользователя для чтение\n return user.readlines() # Возврат всех данных пользователя\n\n\ndef set_file(active_user, user_data):\n with open(f\"./users/{active_user}.txt\", \"w\", encoding=\"utf8\") as user: # Открытие файла активного пользователя для перезаписи\n i = 0 # Счётчик для цикла\n while i < len(user_data): # Цикл перебора всех данных пользователя\n user.write(user_data[i]) # Построчная запись данных в файл\n i += 1 # Увеличиваем счётчик\n\n\ndef nationality(active_user):\n nationality = input(\"Кто вы по национальности? \")\n user_data = get_file(active_user) # Получение всех данных пользователя\n\n user_data[5] = f\"{nationality}\\n\" # Перезапись данных о национальности пользователя\n\n set_file(active_user, user_data) # Сохранение данных в файл пользователя\n return \"Национальность добавлена!\" # Возвращаем успешный результат\n\n\ndef profession(active_user):\n profession = input(\"Укажите вашу профессию, если их много, то укажите их через запятую: \")\n profession = profession.split(\", \") # Форматируем полученных от пользователя данные, переводом в список\n user_data = get_file(active_user) # Получение всех данных пользователя\n\n user_list = re.sub(r'[\\n\\[\\]\\' ]', '', user_data[6]).split(',') # Удаляем из данных в файле лишние символы и переводим в список\n\n user_list.extend(profession) # Метод exdend добавляет элементы массива в другой массив\n user_list = list(filter(None, set(user_list))) # set() перевод массива в набор(в нём нет повторящихся элементов), после list() обратно переводит в список\n user_data[6] = f\"{user_list}\\n\" # Перезапись данных о професиях пользователя\n\n set_file(active_user, user_data) # Сохранение данных в файл пользователя\n return \"Профессии добавлены!\" # Возвращаем успешный результат\n\n\ndef hobby(active_user):\n hobby = input(\"Укажите ваше хобби, если их много, то укажите их через запятую: \")\n hobby = hobby.split(\", \") # Форматируем полученных от пользователя данные, переводом в список\n user_data = get_file(active_user) # Получение всех данных пользователя\n\n user_list = re.sub(r'[\\n\\[\\]\\' ]', '', user_data[7]).split(',') # Удаляем из данных в файле лишние символы и переводим в список\n\n user_list.extend(hobby) # Метод exdend добавляет элементы массива в другой массив\n user_list = list(filter(None, set(user_list))) # set() перевод массива в набор(в нём нет повторящихся элементов), после list() обратно переводит в список\n user_data[7] = f\"{user_list}\\n\" # Перезапись данных о хобби пользователя\n\n set_file(active_user, user_data) # Сохранение данных в файл пользователя\n return \"Хобби добавлены!\" # Возвращаем успешный результат\n","repo_name":"skr1pmen/misic_box","sub_path":"edit_user.py","file_name":"edit_user.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23435188916","text":"\n\n\ndef makenet(n): #n is the dimension\n\tif n<2: return [ (i/10.0,) for i in range(11)]\n\telse: return [(i/10.0,)+v for i in range(11) for v in makenet(n-1)]\n\ndef makeshell(n): #n is the dimension of the shell\n\tnetn = makenet(n)\n\treturn [a[0:i]+ (1,) + a[i:n] for i in range(n+1) for a in netn]\n\t\n\n\n\n#print(makenet(3))\n'''\nN = 2\nnetN = makenet(N)\nshellN = [a[0:i]+ (1,) + a[i:N] for i in range(N+1) for a in netN]\n\nfor item in shellN:\n\tprint(item)\n\nprint(N, len(netN), len(shellN))\n'''\n\nN = 3\nnetN = makenet(N)\nshellN = makeshell(N)\n\n\nfor vec in shellN:\n\tprint(tuple([i*2 for i in vec]))\n\nprint(N, len(netN), len(shellN))\n\n\n\n\n","repo_name":"sanacoryf/ASNorm","sub_path":"NormStuff/testnet.py","file_name":"testnet.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11574873917","text":"class Solution:\n def maxProfit(self, prices, fee):\n L = len(prices)\n dp = [[-float('inf') for _ in range(2)] for _ in range(L)]\n dp[0][0] = -prices[0]\n dp[0][1] = 0 \n for i in range(1, L):\n dp[i][0] = max(dp[i-1][0], dp[i-1][1] - prices[i])\n dp[i][1] = max(dp[i-1][0] + prices[i] - fee, dp[i-1][1])\n \n \n return max(dp[L-1])\n\n\ndef test_ex1():\n prices = [1,3,2,8,4,9]\n fee = 2\n \n ans = 8\n sol = Solution()\n assert ans == sol.maxProfit(prices, fee)\n\ndef test_ex2():\n prices = [1,3,7,5,10,3]\n fee = 3\n \n ans = 6\n sol = Solution()\n assert ans == sol.maxProfit(prices, fee)","repo_name":"rennyv/leetcode","sub_path":"q0714.best.time.to.buy.and.sell.stock.with.transaction.fee/py3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15424242483","text":"from reader import ReaderFactory\nfrom tfidf import TFIDF\nfrom requests_toolbelt.multipart import decoder\n\n\nmessages = {\n 'good': 'Nice summary! You sure know your stuff!',\n 'okay': \"You're on your way to success, but you could use some more studying!\",\n 'bad': 'Oof, you should really consider hitting the books....'\n}\n\n\ndef lambda_handler(event, context):\n reader_factory = ReaderFactory()\n notes = []\n\n if event['files']:\n for file in event['files']:\n # read each individual file\n reader = reader_factory.get(file)\n notes.append(reader.read_text())\n\n if event['notes']:\n # add any additional notes if included\n notes.append(event['notes'])\n\n # consider all uploaded content to be part of the same set of notes\n notes = ' '.join(notes)\n\n # Get on match using tf-idf\n tfidf = TFIDF()\n score = tfidf.find_similarity([notes, event['summary']])\n level = None\n\n # get level from score, which will determine the message\n if score < 0.06:\n level = 'bad'\n elif score < 0.14:\n level = 'okay'\n else:\n level = 'good'\n\n return {\n 'statusCode': 200,\n 'body': {\n 'score': score,\n 'message': messages[level]\n }\n }\n\ndef lambda_new(event, context):\n content_type_header = event['headers']['content-type'] + '; boundary=' + event['body'].split('\\r')[0].split('-')[-1]\n # content_type_header = event['headers']['content-type'] + '; boundary=----WebKitFormBoundaryhkPBzaCCH5WTm3qe'\n\n body = event['body'].encode()\n\n response = ''\n for part in decoder.MultipartDecoder(body, content_type_header).parts:\n response += part.text + \"\\n\"\n\n return {\n 'statusCode': 200,\n 'body': response\n }\n","repo_name":"jtang6598/notes-summary-scorer","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3980240043","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_guest_disk\nshort_description: Manage disks related to virtual machine in given vCenter infrastructure\ndescription:\n - This module can be used to add, remove and update disks belonging to given virtual machine.\n - All parameters and VMware object names are case sensitive.\n - This module is destructive in nature, please read documentation carefully before proceeding.\n - Be careful while removing disk specified as this may lead to data loss.\nauthor:\n - Abhijeet Kasurde (@Akasurde) \noptions:\n name:\n description:\n - Name of the virtual machine.\n - This is a required parameter, if parameter O(uuid) or O(moid) is not supplied.\n type: str\n uuid:\n description:\n - UUID of the instance to gather facts if known, this is VMware's unique identifier.\n - This is a required parameter, if parameter O(name) or O(moid) is not supplied.\n type: str\n moid:\n description:\n - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.\n - This is required if O(name) or O(uuid) is not supplied.\n type: str\n folder:\n description:\n - Destination folder, absolute or relative path to find an existing guest.\n - This is a required parameter, only if multiple VMs are found with same name.\n - The folder should include the datacenter. ESX's datacenter is ha-datacenter\n - 'Examples:'\n - ' folder: /ha-datacenter/vm'\n - ' folder: ha-datacenter/vm'\n - ' folder: /datacenter1/vm'\n - ' folder: datacenter1/vm'\n - ' folder: /datacenter1/vm/folder1'\n - ' folder: datacenter1/vm/folder1'\n - ' folder: /folder1/datacenter1/vm'\n - ' folder: folder1/datacenter1/vm'\n - ' folder: /folder1/datacenter1/vm/folder2'\n type: str\n datacenter:\n description:\n - The datacenter name to which virtual machine belongs to.\n required: true\n type: str\n use_instance_uuid:\n description:\n - Whether to use the VMware instance UUID rather than the BIOS UUID.\n default: false\n type: bool\n disk:\n description:\n - A list of disks to add or remove.\n - The virtual disk related information is provided using this list.\n - All values and parameters are case sensitive.\n suboptions:\n size:\n description:\n - Disk storage size.\n - If size specified then unit must be specified. There is no space allowed in between size number and unit.\n - Only first occurrence in disk element will be considered, even if there are multiple size* parameters available.\n type: str\n size_kb:\n description: Disk storage size in kb.\n type: int\n size_mb:\n description: Disk storage size in mb.\n type: int\n size_gb:\n description: Disk storage size in gb.\n type: int\n size_tb:\n description: Disk storage size in tb.\n type: int\n type:\n description:\n - The type of disk, if not specified then use V(thick) type for new disk, no eagerzero.\n type: str\n choices: ['thin', 'eagerzeroedthick', 'thick', 'rdm', 'vpmemdisk']\n disk_mode:\n description:\n - Type of disk mode. If not specified then use V(persistent) mode for new disk.\n - If set to V(persistent) mode, changes are immediately and permanently written to the virtual disk.\n - If set to V(independent_persistent) mode, same as persistent, but not affected by snapshots.\n - If set to V('independent_nonpersistent) mode, changes to virtual disk are made to a redo log and discarded\n at power off, but not affected by snapshots.\n - Not applicable when disk O(disk.type=vpmemdisk).\n type: str\n choices: ['persistent', 'independent_persistent', 'independent_nonpersistent']\n rdm_path:\n description:\n - Path of LUN for Raw Device Mapping required for O(disk.type=rdm).\n - Only valid if O(disk.type=rdm).\n type: str\n cluster_disk:\n description:\n - This value allows for the sharing of an RDM between two machines.\n - The primary machine holding the RDM uses the default V(false).\n - The secondary machine holding the RDM uses V(true).\n type: bool\n default: false\n compatibility_mode:\n description: Compatibility mode for raw devices. Required when O(disk.type=rdm).\n type: str\n choices: ['physicalMode','virtualMode']\n sharing:\n description:\n - The sharing mode of the virtual disk.\n - Setting sharing means that multiple virtual machines can write to the virtual disk.\n - Sharing can only be set if O(disk.type=eagerzeroedthick) or O(disk.type=rdm).\n type: bool\n default: false\n datastore:\n description:\n - Name of datastore or datastore cluster to be used for the disk.\n - Not applicable when disk O(disk.type=vpmemdisk).\n type: str\n autoselect_datastore:\n description:\n - Select the less used datastore. Specify only if O(disk.datastore) is not specified.\n - Not applicable when disk O(disk.type=vpmemdisk).\n type: bool\n scsi_controller:\n description:\n - SCSI controller number. Only 4 SCSI controllers are allowed per VM.\n type: int\n choices: [0, 1, 2, 3]\n bus_sharing:\n description:\n - Only functions with Paravirtual SCSI Controller.\n - Allows for the sharing of the scsi bus between two virtual machines.\n type: str\n choices: ['noSharing', 'physicalSharing', 'virtualSharing']\n default: 'noSharing'\n unit_number:\n description:\n - Disk Unit Number.\n - Valid value range from 0 to 15, except 7 for SCSI Controller.\n - Valid value range from 0 to 64, except 7 for Paravirtual SCSI Controller on Virtual Hardware version 14 or higher.\n - Valid value range from 0 to 29 for SATA controller.\n - Valid value range from 0 to 14 for NVME controller.\n - Valid value range from 0 to 1 for IDE controller.\n type: int\n required: true\n scsi_type:\n description:\n - Type of SCSI controller. This value is required only for the first occurrence of SCSI Controller.\n - This value is ignored, if SCSI Controller is already present or O(disk.state=absent).\n type: str\n choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual']\n destroy:\n description: If O(disk.state=absent), make sure the disk file is deleted from the datastore.\n type: bool\n default: true\n filename:\n description:\n - Existing disk image to be used. Filename must already exist on the datastore.\n - Specify filename string in C([datastore_name] path/to/file.vmdk) format.\n - Not applicable when disk O(disk.type=vpmemdisk).\n type: str\n state:\n description:\n - State of disk.\n - If set to V(absent), disk will be removed permanently from virtual machine configuration and from VMware storage.\n - If set to V(present), disk will be added if not present at given Controller and Unit Number.\n - or disk exists with different size, disk size is increased, reducing disk size is not allowed.\n type: str\n choices: ['present', 'absent']\n default: 'present'\n controller_type:\n description:\n - This parameter is added for managing disks attaching other types of controllers, e.g., SATA or NVMe.\n - If either O(disk.controller_type) or O(disk.scsi_type) is not specified, then use V(paravirtual) type.\n type: str\n choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual', 'sata', 'nvme', 'ide']\n controller_number:\n description:\n - This parameter is used with O(disk.controller_type) for specifying controller bus number.\n - For O(disk.controller_type=ide), valid value is 0 or 1.\n type: int\n choices: [0, 1, 2, 3]\n iolimit:\n description:\n - Section specifies the shares and limit for storage I/O resource.\n - Not applicable when O(disk.type=vpmemdisk).\n suboptions:\n limit:\n description: Section specifies values for limit where the utilization of a virtual machine will not exceed, even if there are available resources.\n type: int\n shares:\n description: Specifies different types of shares user can add for the given disk.\n suboptions:\n level:\n description: Specifies different level for the shares section.\n type: str\n choices: ['low', 'normal', 'high', 'custom']\n level_value:\n description: Custom value when O(disk.iolimit.shares.level=custom).\n type: int\n type: dict\n type: dict\n shares:\n description:\n - Section for iolimit section tells about what are all different types of shares user can add for disk.\n - Not applicable when disk O(disk.type=vpmemdisk).\n suboptions:\n level:\n description: Tells about different level for the shares section.\n type: str\n choices: ['low', 'normal', 'high', 'custom']\n level_value:\n description: Custom value when O(disk.shares.level=custom).\n type: int\n type: dict\n default: []\n type: list\n elements: dict\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n'''\n\nEXAMPLES = r'''\n- name: Add disks to virtual machine using UUID\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n uuid: 421e4592-c069-924d-ce20-7e7533fab926\n disk:\n - size_mb: 10\n type: thin\n datastore: datacluster0\n state: present\n scsi_controller: 1\n unit_number: 1\n scsi_type: 'paravirtual'\n disk_mode: 'persistent'\n - size_gb: 10\n type: eagerzeroedthick\n state: present\n autoselect_datastore: true\n scsi_controller: 2\n scsi_type: 'buslogic'\n unit_number: 12\n disk_mode: 'independent_persistent'\n - size: 10Gb\n type: eagerzeroedthick\n state: present\n autoselect_datastore: true\n scsi_controller: 2\n scsi_type: 'buslogic'\n unit_number: 1\n disk_mode: 'independent_nonpersistent'\n - filename: \"[datastore1] path/to/existing/disk.vmdk\"\n delegate_to: localhost\n register: disk_facts\n\n- name: Add disks with specified shares to the virtual machine\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n disk:\n - size_gb: 1\n type: thin\n datastore: datacluster0\n state: present\n scsi_controller: 1\n unit_number: 1\n disk_mode: 'independent_persistent'\n shares:\n level: custom\n level_value: 1300\n delegate_to: localhost\n register: test_custom_shares\n\n- name: Add physical raw device mapping to virtual machine using name\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n validate_certs: false\n name: \"Test_VM\"\n disk:\n - type: rdm\n state: present\n scsi_controller: 1\n unit_number: 5\n rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453\n compatibility_mode: 'physicalMode'\n\n- name: Add virtual raw device mapping to virtual machine using name and virtual mode\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n validate_certs: false\n name: \"Test_VM\"\n disk:\n - type: rdm\n state: present\n scsi_controller: 1\n unit_number: 5\n rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453\n compatibility_mode: 'virtualMode'\n disk_mode: 'persistent'\n\n- name: Add raw device mapping to virtual machine with Physical bus sharing\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n validate_certs: false\n name: \"Test_VM\"\n disk:\n - type: rdm\n state: present\n scsi_controller: 1\n unit_number: 5\n rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453\n compatibility_mode: 'virtualMode'\n disk_mode: 'persistent'\n bus_sharing: physicalSharing\n\n- name: Add raw device mapping to virtual machine with Physical bus sharing and clustered disk\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n validate_certs: false\n name: \"Test_VM\"\n disk:\n - type: rdm\n state: present\n scsi_controller: 1\n unit_number: 5\n compatibility_mode: 'virtualMode'\n disk_mode: 'persistent'\n bus_sharing: physicalSharing\n filename: \"[datastore1] path/to/rdm/disk-marker.vmdk\"\n\n- name: create new disk with custom IO limits and shares in IO Limits\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n disk:\n - size_gb: 1\n type: thin\n datastore: datacluster0\n state: present\n scsi_controller: 1\n unit_number: 1\n disk_mode: 'independent_persistent'\n iolimit:\n limit: 1506\n shares:\n level: custom\n level_value: 1305\n delegate_to: localhost\n register: test_custom_IoLimit_shares\n\n- name: Remove disks from virtual machine using name\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n name: VM_225\n disk:\n - state: absent\n scsi_controller: 1\n unit_number: 1\n delegate_to: localhost\n register: disk_facts\n\n- name: Remove disk from virtual machine using moid\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n moid: vm-42\n disk:\n - state: absent\n scsi_controller: 1\n unit_number: 1\n delegate_to: localhost\n register: disk_facts\n\n- name: Remove disk from virtual machine but keep the VMDK file on the datastore\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n name: VM_225\n disk:\n - state: absent\n scsi_controller: 1\n unit_number: 2\n destroy: false\n delegate_to: localhost\n register: disk_facts\n\n- name: Add disks to virtual machine using UUID to SATA and NVMe controller\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n validate_certs: false\n uuid: 421e4592-c069-924d-ce20-7e7533fab926\n disk:\n - size_mb: 256\n type: thin\n datastore: datacluster0\n state: present\n controller_type: sata\n controller_number: 1\n unit_number: 1\n disk_mode: 'persistent'\n - size_gb: 1\n state: present\n autoselect_datastore: true\n controller_type: nvme\n controller_number: 2\n unit_number: 3\n disk_mode: 'independent_persistent'\n delegate_to: localhost\n register: disk_facts\n\n- name: Add a new vPMem disk to virtual machine to SATA controller\n community.vmware.vmware_guest_disk:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n validate_certs: false\n name: VM_226\n disk:\n - type: vpmemdisk\n size_gb: 1\n state: present\n controller_type: sata\n controller_number: 1\n unit_number: 2\n delegate_to: localhost\n register: disk_facts\n'''\n\nRETURN = r'''\ndisk_data:\n description: metadata about the virtual machine's disks after managing them\n returned: always\n type: dict\n sample: {\n \"0\": {\n \"backing_datastore\": \"datastore2\",\n \"backing_disk_mode\": \"persistent\",\n \"backing_eagerlyscrub\": false,\n \"backing_filename\": \"[datastore2] VM_225/VM_225.vmdk\",\n \"backing_thinprovisioned\": false,\n \"backing_writethrough\": false,\n \"backing_uuid\": \"421e4592-c069-924d-ce20-7e7533fab926\",\n \"capacity_in_bytes\": 10485760,\n \"capacity_in_kb\": 10240,\n \"controller_key\": 1000,\n \"key\": 2000,\n \"label\": \"Hard disk 1\",\n \"summary\": \"10,240 KB\",\n \"unit_number\": 0\n },\n }\ndisk_changes:\n description: result of each task, key is the 0-based index with the same sequence in which the tasks were defined\n returned: always\n type: dict\n sample: {\n \"0\": \"Disk deleted.\",\n \"1\": \"Disk created.\"\n }\n'''\n\nimport re\ntry:\n from pyVmomi import vim\nexcept ImportError:\n pass\n\nfrom random import randint\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, \\\n wait_for_task, find_obj, get_all_objs, get_parent_datacenter\nfrom ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper\n\n\nclass PyVmomiHelper(PyVmomi):\n def __init__(self, module):\n super(PyVmomiHelper, self).__init__(module)\n self.device_helper = PyVmomiDeviceHelper(self.module)\n self.desired_disks = self.params['disk'] # Match with vmware_guest parameter\n self.vm = None\n self.config_spec = vim.vm.ConfigSpec()\n self.config_spec.deviceChange = []\n\n def find_disk_by_key(self, disk_key, disk_unit_number):\n found_disk = None\n for device in self.vm.config.hardware.device:\n if isinstance(device, vim.vm.device.VirtualDisk) and device.key == disk_key:\n if device.unitNumber == disk_unit_number:\n found_disk = device\n break\n\n return found_disk\n\n @staticmethod\n def create_disk(ctl_key, disk):\n \"\"\"\n Create Virtual Device Spec for virtual disk\n Args:\n ctl_key: Unique SCSI Controller Key\n disk: The disk configurations dict\n\n Returns: Virtual Device Spec for virtual disk\n\n \"\"\"\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n disk_spec.device = vim.vm.device.VirtualDisk()\n disk_spec.device.key = -randint(20000, 24999)\n\n # Check if RDM first as changing backing later on will erase some settings like disk_mode\n if disk['disk_type'] == 'rdm':\n disk_spec.device.backing = vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo()\n disk_spec.device.backing.deviceName = disk['rdm_path']\n disk_spec.device.backing.compatibilityMode = disk['compatibility_mode']\n elif disk['disk_type'] == 'vpmemdisk':\n disk_spec.device.backing = vim.vm.device.VirtualDisk.LocalPMemBackingInfo()\n else:\n disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()\n\n if disk['disk_type'] != 'vpmemdisk':\n disk_spec.device.backing.diskMode = disk['disk_mode']\n disk_spec.device.backing.sharing = disk['sharing']\n\n if disk['disk_type'] == 'thin':\n disk_spec.device.backing.thinProvisioned = True\n elif disk['disk_type'] == 'eagerzeroedthick':\n disk_spec.device.backing.eagerlyScrub = True\n\n disk_spec.device.controllerKey = ctl_key\n disk_spec.device.unitNumber = disk['disk_unit_number']\n\n return disk_spec\n\n def reconfigure_vm(self, config_spec, device_type):\n \"\"\"\n Reconfigure virtual machine after modifying device spec\n Args:\n config_spec: Config Spec\n device_type: Type of device being modified\n\n Returns: Boolean status 'changed' and actual task result\n\n \"\"\"\n changed, results = (False, '')\n try:\n # Perform actual VM reconfiguration\n task = self.vm.ReconfigVM_Task(spec=config_spec)\n changed, results = wait_for_task(task)\n except vim.fault.InvalidDeviceSpec as invalid_device_spec:\n self.module.fail_json(msg=\"Failed to manage '%s' on given virtual machine due to invalid\"\n \" device spec : %s\" % (device_type, to_native(invalid_device_spec.msg)),\n details=\"Please check ESXi server logs for more details.\")\n except vim.fault.RestrictedVersion as e:\n self.module.fail_json(msg=\"Failed to reconfigure virtual machine due to\"\n \" product versioning restrictions: %s\" % to_native(e.msg))\n\n return changed, results\n\n def get_ioandshares_diskconfig(self, disk_spec, disk):\n io_disk_spec = vim.StorageResourceManager.IOAllocationInfo()\n if 'iolimit' in disk:\n io_disk_spec.limit = disk['iolimit']['limit']\n if 'shares' in disk['iolimit']:\n shares_spec = vim.SharesInfo()\n shares_spec.level = disk['iolimit']['shares']['level']\n if shares_spec.level == 'custom':\n shares_spec.shares = disk['iolimit']['shares']['level_value']\n io_disk_spec.shares = shares_spec\n disk_spec.device.storageIOAllocation = io_disk_spec\n if 'shares' in disk:\n shares_spec = vim.SharesInfo()\n shares_spec.level = disk['shares']['level']\n if shares_spec.level == 'custom':\n shares_spec.shares = disk['shares']['level_value']\n io_disk_spec.shares = shares_spec\n disk_spec.device.storageIOAllocation = io_disk_spec\n return disk_spec\n\n def get_sharing(self, disk, disk_type, disk_index):\n \"\"\"\n Get the sharing mode of the virtual disk\n Args:\n disk: Virtual disk data object\n disk_type: Disk type of the virtual disk\n disk_index: Disk unit number at which disk needs to be attached\n\n Returns:\n sharing_mode: The sharing mode of the virtual disk\n\n \"\"\"\n sharing = disk.get('sharing')\n if sharing and disk_type != 'eagerzeroedthick' and disk_type != 'rdm':\n self.module.fail_json(msg=\"Invalid 'sharing' mode specified for disk index [%s]. 'disk_mode'\"\n \" must be 'eagerzeroedthick' or 'rdm' when 'sharing'.\" % disk_index)\n if sharing:\n sharing_mode = 'sharingMultiWriter'\n else:\n sharing_mode = 'sharingNone'\n return sharing_mode\n\n def ensure_disks(self, vm_obj=None):\n \"\"\"\n Manage internal state of virtual machine disks\n Args:\n vm_obj: Managed object of virtual machine\n\n \"\"\"\n # Set vm object\n self.vm = vm_obj\n vm_files_datastore = self.vm.config.files.vmPathName.split(' ')[0].strip('[]')\n # Sanitize user input\n disk_data = self.sanitize_disk_inputs()\n ctl_changed = False\n disk_change_list = list()\n results = dict(changed=False, disk_data=None, disk_changes=dict())\n new_added_disk_ctl = list()\n sharesval = {'low': 500, 'normal': 1000, 'high': 2000}\n\n # Deal with controller\n for disk in disk_data:\n ctl_found = False\n # check if disk controller is in the new adding queue\n for new_ctl in new_added_disk_ctl:\n if new_ctl['controller_type'] == disk['controller_type'] and new_ctl['controller_number'] == disk['controller_number']:\n ctl_found = True\n break\n # check if disk controller already exists\n if not ctl_found:\n for device in self.vm.config.hardware.device:\n if isinstance(device, self.device_helper.disk_ctl_device_type[disk['controller_type']]):\n if device.busNumber == disk['controller_number']:\n ctl_found = True\n break\n # create disk controller when not found and disk state is present\n if not ctl_found and disk['state'] == 'present':\n # Create new controller\n if disk['controller_type'] in self.device_helper.scsi_device_type.keys():\n ctl_spec = self.device_helper.create_scsi_controller(disk['controller_type'], disk['controller_number'], disk['bus_sharing'])\n elif disk['controller_type'] == 'sata':\n ctl_spec = self.device_helper.create_sata_controller(disk['controller_number'])\n elif disk['controller_type'] == 'nvme':\n ctl_spec = self.device_helper.create_nvme_controller(disk['controller_number'])\n new_added_disk_ctl.append({'controller_type': disk['controller_type'], 'controller_number': disk['controller_number']})\n ctl_changed = True\n self.config_spec.deviceChange.append(ctl_spec)\n elif not ctl_found and disk['state'] == 'absent':\n self.module.fail_json(msg=\"Not found 'controller_type': '%s', 'controller_number': '%s', so can not\"\n \" remove this disk, please make sure 'controller_type' and\"\n \" 'controller_number' are correct.\" % (disk['controller_type'], disk['controller_number']))\n if ctl_changed:\n self.reconfigure_vm(self.config_spec, 'Disk Controller')\n self.config_spec = vim.vm.ConfigSpec()\n self.config_spec.deviceChange = []\n\n # Deal with Disks\n for disk in disk_data:\n disk_found = False\n update_io = False\n disk_change = False\n ctl_found = False\n for device in self.vm.config.hardware.device:\n if isinstance(device, self.device_helper.disk_ctl_device_type[disk['controller_type']]) and device.busNumber == disk['controller_number']:\n for disk_key in device.device:\n disk_device = self.find_disk_by_key(disk_key, disk['disk_unit_number'])\n if disk_device is not None:\n disk_found = True\n if disk['state'] == 'present':\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.device = disk_device\n # Deal with iolimit. Note that if iolimit is set, you HAVE TO both set limit and shares,\n # or ansible will break with \"'NoneType' object is not subscriptable\"\n if 'iolimit' in disk:\n if disk['iolimit']['limit'] != disk_spec.device.storageIOAllocation.limit:\n update_io = True\n\n if 'shares' in disk['iolimit']:\n # 'low', 'normal' and 'high' values in disk['iolimit']['shares']['level'] are converted to int values on vcenter side\n if (disk['iolimit']['shares']['level'] != 'custom'\n and sharesval.get(disk['iolimit']['shares']['level'], 0) != disk_spec.device.storageIOAllocation.shares.shares) or \\\n (disk['iolimit']['shares']['level'] == 'custom'\n and disk['iolimit']['shares']['level_value'] != disk_spec.device.storageIOAllocation.shares.shares):\n update_io = True\n\n if update_io:\n # set the operation to edit so that it knows to keep other settings\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit\n disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)\n disk_change = True\n\n # If this is an RDM ignore disk size\n if disk['disk_type'] != 'rdm':\n if disk['size'] < disk_spec.device.capacityInKB:\n self.module.fail_json(msg=\"Given disk size at disk index [%s] is smaller than found\"\n \" (%d < %d). Reducing disks is not allowed.\"\n % (disk['disk_index'], disk['size'],\n disk_spec.device.capacityInKB))\n if disk['size'] != disk_spec.device.capacityInKB:\n # set the operation to edit so that it knows to keep other settings\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit\n if disk['disk_type'] != 'vpmemdisk':\n disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)\n disk_spec.device.capacityInKB = disk['size']\n disk_change = True\n\n if disk_change:\n self.config_spec.deviceChange.append(disk_spec)\n disk_change_list.append(disk_change)\n results['disk_changes'][disk['disk_index']] = \"Disk reconfigured.\"\n\n elif disk['state'] == 'absent':\n # Disk already exists, deleting\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n if disk['destroy'] is True:\n disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy\n disk_spec.device = disk_device\n self.config_spec.deviceChange.append(disk_spec)\n disk_change = True\n disk_change_list.append(disk_change)\n results['disk_changes'][disk['disk_index']] = \"Disk deleted.\"\n break\n\n if disk_found:\n break\n if not disk_found and disk['state'] == 'present':\n # Add new disk\n disk_spec = self.create_disk(device.key, disk)\n # get Storage DRS recommended datastore from the datastore cluster\n if disk['disk_type'] == 'rdm':\n # Since RDMs can be shared between two machines cluster_disk with rdm will\n # invoke a copy of the existing disk instead of trying to create a new one which causes\n # file lock issues in VSphere. This ensures we dont add a \"create\" operation.\n if disk['filename'] is not None and disk['cluster_disk'] is True:\n disk_spec.device.backing.fileName = disk['filename']\n else:\n disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create\n else:\n if disk['filename'] is None:\n if disk['datastore_cluster'] is not None:\n datastore_name = self.get_recommended_datastore(datastore_cluster_obj=disk['datastore_cluster'], disk_spec_obj=disk_spec)\n disk['datastore'] = find_obj(self.content, [vim.Datastore], datastore_name)\n\n disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create\n disk_spec.device.capacityInKB = disk['size']\n # Set backing filename when datastore is configured and not the same as VM datastore\n # If datastore is not configured or backing filename is not set, default is VM datastore\n if disk['datastore'] is not None and disk['datastore'].name != vm_files_datastore:\n disk_spec.device.backing.datastore = disk['datastore']\n disk_spec.device.backing.fileName = \"[%s] %s/%s_%s_%s_%s.vmdk\" % (disk['datastore'].name,\n self.vm.name,\n self.vm.name,\n device.key,\n str(disk['disk_unit_number']),\n str(randint(1, 10000)))\n elif disk['filename'] is not None:\n disk_spec.device.backing.fileName = disk['filename']\n disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)\n\n self.config_spec.deviceChange.append(disk_spec)\n disk_change = True\n disk_change_list.append(disk_change)\n results['disk_changes'][disk['disk_index']] = \"Disk created.\"\n break\n if disk_change:\n # Adding multiple disks in a single attempt raises weird errors\n # So adding single disk at a time.\n self.reconfigure_vm(self.config_spec, 'disks')\n self.config_spec = vim.vm.ConfigSpec()\n self.config_spec.deviceChange = []\n if any(disk_change_list):\n results['changed'] = True\n results['disk_data'] = self.device_helper.gather_disk_info(self.vm)\n self.module.exit_json(**results)\n\n def sanitize_disk_inputs(self):\n \"\"\"\n Check correctness of disk input provided by user\n\n Returns: A list of dictionary containing disk information\n\n \"\"\"\n disks_data = list()\n if not self.desired_disks:\n self.module.exit_json(changed=False, msg=\"No disks provided for virtual machine '%s' for management.\"\n % self.vm.name)\n\n for disk_index, disk in enumerate(self.desired_disks):\n # Initialize default value for disk\n current_disk = dict(disk_index=disk_index,\n state='present',\n destroy=True,\n filename=None,\n datastore_cluster=None,\n datastore=None,\n autoselect_datastore=True,\n disk_unit_number=0,\n controller_number=0,\n disk_mode='persistent',\n disk_type='thick',\n sharing=False,\n bus_sharing='noSharing',\n cluster_disk=False)\n # Type of Disk\n if disk['type'] is not None:\n current_disk['disk_type'] = disk['type']\n if current_disk['disk_type'] == 'vpmemdisk':\n if self.vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:\n self.module.fail_json(msg=\"Please make sure VM is in powered off state before doing vPMem disk\"\n \" reconfiguration.\")\n disk['datastore'] = None\n disk['autoselect_datastore'] = None\n disk['filename'] = None\n disk['disk_mode'] = None\n\n # Check state\n if disk['state'] is not None:\n current_disk['state'] = disk['state']\n\n # Check controller type\n if disk['scsi_type'] is not None and disk['controller_type'] is None:\n current_disk['controller_type'] = disk['scsi_type']\n elif disk['scsi_type'] is None and disk['controller_type'] is None:\n current_disk['controller_type'] = 'paravirtual'\n elif disk['controller_type'] is not None and disk['scsi_type'] is None:\n current_disk['controller_type'] = disk['controller_type']\n else:\n self.module.fail_json(msg=\"Please specify either 'scsi_type' or 'controller_type' for disk index [%s].\"\n % disk_index)\n if current_disk['controller_type'] == 'ide':\n if self.vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:\n self.module.fail_json(msg=\"Please make sure VM is in powered off state before doing IDE disk\"\n \" reconfiguration.\")\n\n # Check controller bus number\n if disk['scsi_controller'] is not None and disk['controller_number'] is None and disk['controller_type'] is None:\n temp_disk_controller = disk['scsi_controller']\n elif disk['controller_number'] is not None and disk['scsi_controller'] is None and disk['scsi_type'] is None:\n temp_disk_controller = disk['controller_number']\n else:\n self.module.fail_json(msg=\"Please specify 'scsi_controller' with 'scsi_type', or 'controller_number'\"\n \" with 'controller_type' under disk parameter for disk index [%s], which is\"\n \" required while creating or configuring disk.\" % disk_index)\n try:\n disk_controller = int(temp_disk_controller)\n except ValueError:\n self.module.fail_json(msg=\"Invalid controller bus number '%s' specified\"\n \" for disk index [%s]\" % (temp_disk_controller, disk_index))\n if current_disk['controller_type'] == 'ide' and disk_controller not in [0, 1]:\n self.module.fail_json(msg=\"Invalid controller bus number '%s' specified\"\n \" for disk index [%s], valid value is 0 or 1\" % (disk_controller, disk_index))\n\n current_disk['controller_number'] = disk_controller\n\n try:\n temp_disk_unit_number = int(disk['unit_number'])\n except ValueError:\n self.module.fail_json(msg=\"Invalid Disk unit number ID '%s' specified at index [%s].\"\n % (disk['unit_number'], disk_index))\n if current_disk['controller_type'] in self.device_helper.scsi_device_type.keys():\n # the Paravirtual SCSI Controller Supports up to 64 disks in vSphere 6.7. Using hardware\n # version 14 or higher from the vm config should catch this appropriately.\n hw_version = int(self.vm.config.version.split('-')[1])\n if current_disk['controller_type'] == 'paravirtual' and hw_version >= 14:\n if temp_disk_unit_number not in range(0, 64):\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for disk [%s] at index [%s],\"\n \" please specify value between 0 to 64 only (excluding 7).\"\n % (temp_disk_unit_number, disk_index))\n if temp_disk_unit_number == 7:\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for disk at index [%s], please\"\n \" specify value other than 7 as it is reserved for SCSI Controller.\"\n % disk_index)\n\n else:\n if temp_disk_unit_number not in range(0, 16):\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for disk [%s] at index [%s],\"\n \" please specify value between 0 to 15 only (excluding 7).\"\n % (temp_disk_unit_number, disk_index))\n if temp_disk_unit_number == 7:\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for disk at index [%s], please\"\n \" specify value other than 7 as it is reserved for SCSI Controller.\"\n % disk_index)\n elif current_disk['controller_type'] == 'sata' and temp_disk_unit_number not in range(0, 30):\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for SATA disk [%s] at index [%s],\"\n \" please specify value between 0 to 29\" % (temp_disk_unit_number, disk_index))\n elif current_disk['controller_type'] == 'nvme' and temp_disk_unit_number not in range(0, 15):\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for NVMe disk [%s] at index [%s],\"\n \" please specify value between 0 to 14\" % (temp_disk_unit_number, disk_index))\n elif current_disk['controller_type'] == 'ide' and temp_disk_unit_number not in [0, 1]:\n self.module.fail_json(msg=\"Invalid Disk unit number ID specified for IDE disk [%s] at index [%s],\"\n \" please specify value 0 or 1\" % (temp_disk_unit_number, disk_index))\n current_disk['disk_unit_number'] = temp_disk_unit_number\n\n # By default destroy file from datastore if 'destroy' parameter is not provided\n if current_disk['state'] == 'absent':\n current_disk['destroy'] = disk.get('destroy', True)\n elif current_disk['state'] == 'present':\n # Select datastore or datastore cluster\n if disk['datastore'] is not None:\n if disk['autoselect_datastore'] is not None:\n self.module.fail_json(msg=\"Please specify either 'datastore' or 'autoselect_datastore' for\"\n \" disk index [%s]\" % disk_index)\n # Check if given value is datastore or datastore cluster\n datastore_name = disk['datastore']\n datastore_cluster = find_obj(self.content, [vim.StoragePod], datastore_name)\n datastore = find_obj(self.content, [vim.Datastore], datastore_name)\n if datastore is None and datastore_cluster is None:\n self.module.fail_json(msg=\"Failed to find datastore or datastore cluster named '%s' \"\n \"in given configuration.\" % disk['datastore'])\n if datastore_cluster:\n # If user specified datastore cluster, keep track of that for determining datastore later\n current_disk['datastore_cluster'] = datastore_cluster\n elif datastore:\n ds_datacenter = get_parent_datacenter(datastore)\n if ds_datacenter.name != self.module.params['datacenter']:\n self.module.fail_json(msg=\"Get datastore '%s' in datacenter '%s', not the configured\"\n \" datacenter '%s'\" % (datastore.name, ds_datacenter.name,\n self.module.params['datacenter']))\n current_disk['datastore'] = datastore\n current_disk['autoselect_datastore'] = False\n elif disk['autoselect_datastore'] is not None:\n # Find datastore which fits requirement\n datastores = get_all_objs(self.content, [vim.Datastore])\n if not datastores:\n self.module.fail_json(msg=\"Failed to gather information about available datastores in given\"\n \" datacenter '%s'.\" % self.module.params['datacenter'])\n datastore = None\n datastore_freespace = 0\n for ds in datastores:\n if ds.summary.freeSpace > datastore_freespace:\n # If datastore field is provided, filter destination datastores\n datastore = ds\n datastore_freespace = ds.summary.freeSpace\n current_disk['datastore'] = datastore\n else:\n if current_disk['disk_type'] == 'vpmemdisk':\n current_disk['datastore'] = None\n current_disk['autoselect_datastore'] = False\n\n if disk['filename'] is not None:\n current_disk['filename'] = disk['filename']\n\n if [x for x in disk.keys() if ((x.startswith('size_') or x == 'size') and disk[x] is not None)]:\n # size, size_tb, size_gb, size_mb, size_kb\n disk_size_parse_failed = False\n if disk['size'] is not None:\n size_regex = re.compile(r'(\\d+(?:\\.\\d+)?)([tgmkTGMK][bB])')\n disk_size_m = size_regex.match(disk['size'])\n if disk_size_m:\n expected = disk_size_m.group(1)\n unit = disk_size_m.group(2)\n else:\n disk_size_parse_failed = True\n try:\n if re.match(r'\\d+\\.\\d+', expected):\n # We found float value in string, let's typecast it\n expected = float(expected)\n else:\n # We found int value in string, let's typecast it\n expected = int(expected)\n except (TypeError, ValueError, NameError):\n disk_size_parse_failed = True\n else:\n # Even multiple size_ parameter provided by user,\n # consider first value only\n param = [x for x in disk.keys() if (x.startswith('size_') and disk[x] is not None)][0]\n unit = param.split('_')[-1]\n disk_size = disk[param]\n if isinstance(disk_size, (float, int)):\n disk_size = str(disk_size)\n\n try:\n if re.match(r'\\d+\\.\\d+', disk_size):\n # We found float value in string, let's typecast it\n expected = float(disk_size)\n else:\n # We found int value in string, let's typecast it\n expected = int(disk_size)\n except (TypeError, ValueError, NameError):\n disk_size_parse_failed = True\n\n if disk_size_parse_failed:\n # Common failure\n self.module.fail_json(msg=\"Failed to parse disk size for disk index [%s],\"\n \" please review value provided\"\n \" using documentation.\" % disk_index)\n\n disk_units = dict(tb=3, gb=2, mb=1, kb=0)\n unit = unit.lower()\n if unit in disk_units:\n current_disk['size'] = expected * (1024 ** disk_units[unit])\n else:\n self.module.fail_json(msg=\"%s is not a supported unit for disk size for disk index [%s].\"\n \" Supported units are ['%s'].\" % (unit, disk_index, \"', '\".join(disk_units.keys())))\n elif current_disk['filename'] is None and disk['type'] != 'rdm':\n # No size found but disk, fail. Excepting RDMs because the cluster_disk will need a filename.\n self.module.fail_json(msg=\"No size, size_kb, size_mb, size_gb or size_tb\"\n \" attribute found into disk index [%s] configuration.\" % disk_index)\n\n # Mode of Disk\n if disk['disk_mode'] is not None:\n current_disk['disk_mode'] = disk['disk_mode']\n\n if current_disk['disk_type'] != 'vpmemdisk':\n # Sharing mode of disk\n current_disk['sharing'] = self.get_sharing(disk, current_disk['disk_type'], disk_index)\n\n if disk['shares'] is not None:\n current_disk['shares'] = disk['shares']\n if disk['iolimit'] is not None:\n current_disk['iolimit'] = disk['iolimit']\n\n # Deal with RDM disk needs. RDMS require some different values compared to Virtual Disks\n if disk['type'] == 'rdm':\n compatibility_mode = disk.get('compatibility_mode', 'physicalMode')\n if compatibility_mode not in ['physicalMode', 'virtualMode']:\n self.module.fail_json(msg=\"Invalid 'compatibility_mode' specified for disk index [%s]. Please specify\"\n \"'compatibility_mode' value from ['physicalMode', 'virtualMode'].\" % disk_index)\n current_disk['compatibility_mode'] = compatibility_mode\n\n # RDMs need a path\n if 'rdm_path' not in disk and 'filename' not in disk:\n self.module.fail_json(msg=\"rdm_path and/or 'filename' needs must be specified when using disk type 'rdm'\"\n \"for disk index [%s]\" % disk_index)\n else:\n current_disk['rdm_path'] = disk.get('rdm_path')\n\n if disk['filename'] and disk['rdm_path'] is None and disk['cluster_disk'] is False:\n self.module.fail_json(msg=\" 'filename' requires setting 'cluster_disk' to True when using disk type 'rdm' without a\"\n \"'rdm_path' for disk index [%s]\" % disk_index)\n else:\n current_disk['cluster_disk'] = disk.get('cluster_disk')\n\n # Enable Physical or virtual SCSI Bus Sharing\n if disk['bus_sharing']:\n bus_sharing = disk.get('bus_sharing', 'noSharing')\n if bus_sharing not in ['noSharing', 'physicalSharing', 'virtualSharing']:\n self.module.fail_json(msg=\"Invalid SCSI 'bus_sharing' specied for disk index [%s]. Please \"\n \"specify 'bus_sharing' value from \"\n \"['noSharing', 'physicalSharing', 'virtualSharing'].\" % disk_index)\n current_disk['bus_sharing'] = bus_sharing\n\n disks_data.append(current_disk)\n\n return disks_data\n\n def get_recommended_datastore(self, datastore_cluster_obj, disk_spec_obj):\n \"\"\"\n Return Storage DRS recommended datastore from datastore cluster\n Args:\n datastore_cluster_obj: datastore cluster managed object\n\n Returns: Name of recommended datastore from the given datastore cluster,\n Returns None if no datastore recommendation found.\n\n \"\"\"\n # Check if Datastore Cluster provided by user is SDRS ready\n sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled\n if sdrs_status:\n # We can get storage recommendation only if SDRS is enabled on given datastorage cluster\n disk_loc = vim.storageDrs.PodSelectionSpec.DiskLocator()\n pod_config = vim.storageDrs.PodSelectionSpec.VmPodConfig()\n pod_config.storagePod = datastore_cluster_obj\n pod_config.disk = [disk_loc]\n pod_sel_spec = vim.storageDrs.PodSelectionSpec()\n pod_sel_spec.initialVmConfig = [pod_config]\n storage_spec = vim.storageDrs.StoragePlacementSpec()\n storage_spec.configSpec = vim.vm.ConfigSpec()\n storage_spec.configSpec.deviceChange.append(disk_spec_obj)\n storage_spec.resourcePool = self.vm.resourcePool\n storage_spec.podSelectionSpec = pod_sel_spec\n storage_spec.vm = self.vm\n storage_spec.type = 'reconfigure'\n\n try:\n rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)\n rec_action = rec.recommendations[0].action[0]\n return rec_action.destination.name\n except Exception:\n # There is some error so we fall back to general workflow\n pass\n datastore = None\n datastore_freespace = 0\n for ds in datastore_cluster_obj.childEntity:\n if ds.summary.maintenanceMode == \"inMaintenance\":\n continue\n if ds.summary.freeSpace > datastore_freespace:\n # If datastore field is provided, filter destination datastores\n datastore = ds\n datastore_freespace = ds.summary.freeSpace\n if datastore:\n return datastore.name\n return None\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n name=dict(type='str'),\n uuid=dict(type='str'),\n moid=dict(type='str'),\n folder=dict(type='str'),\n datacenter=dict(type='str', required=True),\n use_instance_uuid=dict(type='bool', default=False),\n disk=dict(\n type='list',\n default=[],\n elements='dict',\n options=dict(\n size=dict(type='str'),\n size_kb=dict(type='int'),\n size_mb=dict(type='int'),\n size_gb=dict(type='int'),\n size_tb=dict(type='int'),\n type=dict(type='str', choices=['thin', 'eagerzeroedthick', 'thick', 'rdm', 'vpmemdisk']),\n disk_mode=dict(type='str', choices=['persistent', 'independent_persistent', 'independent_nonpersistent']),\n compatibility_mode=dict(type='str', choices=['physicalMode', 'virtualMode']),\n rdm_path=dict(type='str'),\n sharing=dict(type='bool', default=False),\n datastore=dict(type='str'),\n autoselect_datastore=dict(type='bool'),\n scsi_controller=dict(type='int', choices=[0, 1, 2, 3]),\n unit_number=dict(type='int', required=True),\n scsi_type=dict(type='str', choices=['buslogic', 'lsilogic', 'paravirtual', 'lsilogicsas']),\n destroy=dict(type='bool', default=True),\n filename=dict(type='str'),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n controller_type=dict(type='str', choices=['buslogic', 'lsilogic', 'paravirtual', 'lsilogicsas', 'sata', 'nvme', 'ide']),\n controller_number=dict(type='int', choices=[0, 1, 2, 3]),\n bus_sharing=dict(type='str', choices=['noSharing', 'physicalSharing', 'virtualSharing'], default='noSharing'),\n cluster_disk=dict(type='bool', default=False),\n iolimit=dict(\n type='dict',\n options=dict(\n limit=dict(type='int'),\n shares=dict(\n type='dict',\n options=dict(\n level=dict(type='str', choices=['low', 'high', 'normal', 'custom']),\n level_value=dict(type='int'),\n ),\n ),\n )),\n shares=dict(\n type='dict',\n options=dict(\n level=dict(type='str', choices=['low', 'high', 'normal', 'custom']),\n level_value=dict(type='int'),\n ),\n ),\n ),\n ),\n )\n module = AnsibleModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'uuid', 'moid']],\n )\n\n if module.params['folder']:\n # FindByInventoryPath() does not require an absolute path\n # so we should leave the input folder path unmodified\n module.params['folder'] = module.params['folder'].rstrip('/')\n\n pyv = PyVmomiHelper(module)\n # Check if the VM exists before continuing\n vm = pyv.get_vm()\n\n if not vm:\n # We unable to find the virtual machine user specified\n # Bail out\n vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))\n module.fail_json(msg=\"Unable to manage disks for non-existing\"\n \" virtual machine '%s'.\" % vm_id)\n\n # VM exists\n try:\n pyv.ensure_disks(vm_obj=vm)\n except Exception as exc:\n module.fail_json(msg=\"Failed to manage disks for virtual machine\"\n \" '%s' with exception : %s\" % (vm.name,\n to_native(exc)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible-collections/community.vmware","sub_path":"plugins/modules/vmware_guest_disk.py","file_name":"vmware_guest_disk.py","file_ext":"py","file_size_in_byte":59867,"program_lang":"python","lang":"en","doc_type":"code","stars":299,"dataset":"github-code","pt":"2"} +{"seq_id":"21024497090","text":"import requests, json, re, sys\nfrom time import sleep\nfrom config import FMA_API\n\ndef uprint(*objects, sep=' ', end='\\n', file=sys.stdout):\n enc = file.encoding\n if enc == 'UTF-8':\n print(*objects, sep=sep, end=end, file=file)\n else:\n f = lambda obj: str(obj).encode(enc, errors='backslashreplace').decode(enc)\n print(*map(f, objects), sep=sep, end=end, file=file)\n\nfor page in range(1, 832):\n\n payload = { 'api_key' : str(FMA_API), 'page' : page }\n try:\n r = requests.get(\"https://freemusicarchive.org/api/get/artists.json?\", params = payload)\n sleep(1)\n except:\n continue\n data = json.loads(r.text)\n #total: 16610, total_pages: 1108 (831?), limit: 20\n\n\n for l in data['dataset']:\n try:\n artist_id = int(l['artist_id'])\n artist_name = str(l['artist_name'])\n if artist_id not in ids:\n ids[artist_id] = []\n ids[artist_id].append(artist_name)\n except:\n print (data['errors'])\n\n uprint (artist_name)\nwith open('fma_ids.json', 'w') as outfile:\n json.dump(ids, outfile)\n","repo_name":"erutigl3/Land-of-the-Free-Music-Archive","sub_path":"req_FMA_artistid.py","file_name":"req_FMA_artistid.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"21722920529","text":"'''\nThis file plays the blackjack game. The user inputs what type of game play they want to see.\n'''\nfrom players import Baseline, Oracle, RLPlayer\n\nplayers = ['Baseline', 'Oracle', 'RL']\nstrategies = ['Hi-Lo', 'Omega II', 'Wong Halves']\nalgorithms = ['Q-Learning']\n\ndef getAlgorithm():\n while True:\n algorithm = raw_input('What algorithm should RL use? (Q-Learning) ')\n if algorithm in algorithms: return algorithm\n\ndef getStrategy():\n while True:\n strategy = raw_input('What counting strategy should RL use? (Hi-Lo, Omega II, Wong Halves) ')\n if strategy in strategies: return strategy\n\ndef main():\n player = RLPlayer('Hi-Lo', 'Q-Learning')\n player.train(1000000)\n\n '''while True:\n playerName = raw_input('What type of player do you want? (Baseline, Oracle, RL) ')\n if playerName in players:\n player = None\n if playerName == 'Baseline':\n player = Baseline(playerName)\n player.playGame()\n elif playerName == 'Oracle':\n player = Oracle(playerName)\n player.playGame()\n else:\n player = RLPlayer(getStrategy(), getAlgorithm())\n player.train(1000000)\n break'''\n\nif __name__ == '__main__': main()\n","repo_name":"virajmehta/rl-blackjack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1394878275","text":"print('-+-'*8)\nprint(' GERADOR DE PA 2.0 ')\nprint('-+-'*8)\nn = int(input('Primeiro termo da PA: '))\nr = int(input('A sua razão: '))\ny = 10\ncont = 0\nwhile y != 0:\n print('- {} '.format(n), end='')\n n = n + r\n y -= 1\n cont += 1\nt = 1\nwhile t != 0:\n t = int(input('\\nQuer mostrar mais quantos termos? '))\n if t != 0:\n x = t\n while x != 0:\n print(' - {}'.format(n), end='')\n n = n + r\n x -= 1\n cont += 1\nprint('Progressão finalizada com {} termos'.format(cont))","repo_name":"Werberty/Curso-em-Video-Python3","sub_path":"Modulo 2/ex061.py","file_name":"ex061.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26559532550","text":"import sqlite3\nfrom sqlite3 import Error\n\nclass AccessApproaches:\n \n __database = \"Database/data.db\"\n\n def __init__(self):\n pass\n\n def __createConnection(self, dbFile):\n \n conn = None\n try:\n conn = sqlite3.connect(dbFile)\n return conn\n\n except Error as e:\n print(e)\n \n return conn\n\n def __execute(self,conn,command):\n try:\n c = conn.cursor()\n c.execute(command)\n conn.commit()\n\n return \"Added Successfully!\"\n\n except Error as e:\n return e\n\n def addApproach(self,approach):\n # create a connection\n conn = self.__createConnection(self.__database)\n\n if conn is not None:\n # make the insert command\n insert = '''INSERT INTO Approaches \n VALUES('{}','{}','{}','{}');\n '''.format(approach.getName(),approach.getArchDetails(),approach.getPaperLink(),approach.getOwnerID())\n \n dbResponse = self.__execute(conn,insert) # execute it\n \n return dbResponse\n\n else:\n print(\"Error! Cannot create the database connection.\")\n return \"Error! Cannot create the database connection.\"\n","repo_name":"dillonkh/metric","sub_path":"DataAccess/AccessApproaches.py","file_name":"AccessApproaches.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"37695365804","text":"#!/usr/bin/env python\n#########################################################################################\n#\t\t\t\t\t\t\t\t\t\t\t#\n# Name\t :\tnb_samples.py\t\t\t\t\t\t\t\t#\n# Version : 0.4\t\t\t\t\t\t\t\t#\n# Project : targeted Metabolomics\t\t\t\t\t\t#\n# Description : Script to select stool samples based on weight, sample id, time or combination of them\t\t#\n# Author : Brigida Rusconi\t\t\t\t\t\t\t\t#\n# Date : August 17th, 2016\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t#\n#########################################################################################\n\n\nimport argparse, os, sys, csv,pandas,pdb, numpy\nfrom pandas import *\nfrom numpy import *\nimport math\nfrom math import floor\nimport re\nfrom re import search\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-o', '--output', help=\"table of nb of samples above cutoff\")\nparser.add_argument('-i', '--table', help=\"stool master list as txt file\")\nparser.add_argument('-w', '--weight', help=\"min sample weight\",const=2400,nargs='?')\nparser.add_argument('-b', '--DOB', help=\"list with DOB. Required: header: Infant date of birth, file format: txt\")\nparser.add_argument('-s', '--sample', help=\"list with samples. Required: header: Sample, file format: txt \")\nparser.add_argument('-d', '--days', help=\"list with days.Required: header: Days, file format: txt\")\nparser.add_argument('-t', '--DOL', help=\"make table with DOL of stool sample list (True/False)\",default=False)\nparser.add_argument('-m', '--matched', help=\"list with sample and days to look up\")\nparser.add_argument('-a','--all',help=\"output with all matching time points and samples\")\n\nargs = parser.parse_args()\noutput_file = args.output\ninput_file = args.table\nDOB = args.DOB\ndol_table=args.DOL\n#pdb.set_trace()\n#-------------------read tables and parse them---------------------------------\n\n\n\ndata=read_csv(input_file,sep='\\t', dtype=object)\ndob=read_csv(DOB, dtype=object)\ndata.rename(columns={'Time Collected':'time_collected'},inplace=True)\ndob.rename(columns={'Infant date of birth':'DOB'},inplace=True)\ndob.DOB=to_datetime(dob.DOB)\ndata.dropna(subset=['time_collected'], inplace=True)\ndata[data.time_collected != 'D/T Unknown']\ndata.time_collected=to_datetime(data.time_collected, errors='coerce')\ndata.Weight=to_numeric(data.Weight,errors='coerce')\ndata.dropna(subset=['Weight'], inplace=True)\n\n#sum weight of duplicate values\n#df1=data.groupby(['Study','time_collected'])['Weight'].sum().reset_index()\n#http://pandas.pydata.org/pandas-docs/stable/groupby.html\n\ndata['total_weight']=data.groupby(['Study','time_collected'])['Weight'].transform('sum')\n\n#pdb.set_trace()\n#reorder dataframe without duplicates of study and time collected\n\n#replace with sum values of weight\n\n\n\n#----------------optional------------- weight of sample--------------------------------\nif args.weight:\n w=args.weight\n df5=DataFrame()\n \n#get samples with enough weight\n# & has higher precedence than == BE CAREFUL http://pandas.pydata.org/pandas-docs/stable/indexing.html#boolean-indexing\n# with unique each item is only iterated once\n#have to remove any characters so that it will not fail on the float conversion\n\n uniq=list(data.Patient.unique())\n for i,item in enumerate(uniq):\n if bool(search(r'\\d',item)) is not True:\n uniq.pop(i)\n for item in uniq:\n if float(item)>=float(100.01):\n df5=df5.append(data[(data.Patient==item) & (data.total_weight >=float(w))])\n else:\n df5=df5.append(data[(data.Patient==item) & (data.total_weight >=float(250))])\n\nelse:\n df5=data[data.Weight>0]\n\n\n#pdb.set_trace()\n#----------------optional-------------selected samples--------------------------------\n\nif args.sample:\n samp=args.sample\n samp1=read_csv(samp,sep='\\t',dtype=object)\n samp1.sort('Sample',inplace=True)\n samp1.set_index('Sample',inplace=True)\n df5.set_index('Patient',inplace=True)\n df5=df5[df5.index.isin(samp1.index)]\n df5.reset_index(inplace=True)\n #group samples by patient\n nb_sampl=df5.groupby('Patient').size()\nelse:\n nb_sampl=DataFrame(df5.groupby('Patient').size(),columns=['count'])\n\n#--------------------count of samples-----------------------------\nwith open(output_file ,'w') as output:\n nb_sampl.to_csv(output, sep='\\t')\n\n#----------------------------------------------------------------------------------------\n#-----------------DOL information if wanted--------------------------------------------------------\nif dol_table=='True':\n output2=args.all\n \n #make list with DOL value\n df5.set_index(['Patient'],inplace=True)\n dob.set_index(['Study ID'],inplace=True)\n\n df6=concat([df5,dob['DOB']],axis=1,join_axes=[df5.index])\n df6['DOL']=df6['time_collected']-df6['DOB']\n\n #convert to float days\n new=list()\n for item in df6['DOL']:\n new.append(float(\"{0:.2f}\".format(item.total_seconds()/86400)))\n\n df6.insert(df6.columns.size, \"DOL_dec\",new)\n\n\n #--------------------optional---------select time point-------------------------------------------\n if args.days and (not args.sample) and (not args.matched):\n day=args.days\n dy1=read_csv(day,sep='\\t',dtype=object)\n dy1=list(to_numeric(dy1.Days))\n rd=list()\n for item in df6['DOL_dec']:\n rd.append(floor(item))\n df6.insert(df6.columns.size,'rounded',rd)\n df6.reset_index(inplace=True)\n pos=list()\n for i in df6.index:\n if float(df6['rounded'][i]) in dy1:\n pos.append(i)\n df8=df6[df6.index.isin(pos)]\n with open(output2 ,'w') as output:\n df8.to_csv(output, sep='\\t')\n\n #--------------------optional---------select time point for select samples-------------------------------------------\n elif args.days and args.sample and (not args.matched):\n day=args.days\n dy1=read_csv(day,sep='\\t',dtype=object)\n dy1=list(to_numeric(dy1.Days))\n samp=args.sample\n samp1=read_csv(samp,sep='\\t',dtype=object)\n samp1.sort('Sample',inplace=True)\n samp1.set_index('Sample',inplace=True)\n rd=list()\n for item in df6['DOL_dec']:\n rd.append(floor(item))\n df6.insert(df6.columns.size,'rounded',rd)\n df6.reset_index(inplace=True)\n pos=list()\n for i in df6.index:\n if float(df6['rounded'][i]) in dy1:\n pos.append(i)\n df8=df6[df6.index.isin(pos)]\n df8.set_index('Patient',inplace=True)\n df8=df8[df8.index.isin(samp1.index)]\n df8.reset_index(inplace=True)\n with open(output2 ,'w') as output:\n df8.to_csv(output, sep='\\t')\n#--------------------optional---------select matched controls/cases-------------------------------------------\n elif args.matched and (not args.days) and (not args.sample):\n match=args.matched\n matched1=read_csv(match,sep='\\t',dtype=object)\n matched1.Days=to_numeric(matched1.Days)\n \n cases=matched1[matched1.Phenotype=='Cases']\n #find closest day to onset\n cases.reset_index(inplace=True)\n rd=list()\n for item in df6['DOL_dec']:\n rd.append(floor(item))\n df6.insert(df6.columns.size,'rounded',rd)\n df6.reset_index(inplace=True)\n closest=list()\n\n for i,item in enumerate(cases.Sample.unique()):\n l=list(df6[(df6.Patient==item) & (df6.rounded0:\n closest.append(min(l, key=lambda x:cases.Days[i]-x))\n else:\n closest.append(cases.Days[i])\n cases.insert(cases.columns.size,'closest',closest)\n cases.set_index(['Sample','closest'],inplace=True)\n df6.set_index(['Patient','rounded'],inplace=True)\n sel_cases=df6[df6.index.isin(cases.index)]\n sel_cases.reset_index(inplace=True)\n #replace closest sample time point for controls to match\n controls=matched1[matched1.Phenotype=='Controls']\n controls.reset_index(inplace=True)\n\n cases.reset_index(inplace=True)\n for i,item in enumerate(cases.Sample):\n for x,item1 in enumerate(controls.Matching_Case):\n if item==item1:\n controls.Days[x]=cases.closest[i]\n\n #make a range from the days that are selected to have more control hits, maybe modify so that interval can be changed and not fixed currently only 3 days before onset\n re=list()\n id=list()\n for i,item in enumerate(controls.Days):\n re.append(range((int(controls.Days[i])-3),(int(controls.Days[i]+1))))\n id.append(repeat(controls.Sample[i],len(re[i])).tolist())\n flat_re=[n for item in re for n in item]\n flat_id=[n for item in id for n in item]\n ri=zip(flat_id,flat_re)\n tes=concatenate([z for z in ri])\n tes=tes.reshape(-1,2)\n matched2=DataFrame(tes,columns=['Sample','Days'])\n matched2.Days=matched2.Days.astype(float)\n\n df6.reset_index(inplace=True)\n df7=DataFrame()\n for item in matched2.Sample.unique():\n l=df6[df6.Patient==item]\n g=list(matched2[matched2.Sample==item]['Days'])\n df7=df7.append(l[l['rounded'].isin(g)])\n df8=concat([sel_cases,df7],axis=0)\n # append onset day if you use index.value you get the index of the match\n onset=list()\n for item in df8.Patient:\n onset.append(matched1.Days[matched1[matched1.Sample==item].index.values].values[0])\n#pdb.set_trace()\n df8.reset_index(inplace=True)\n df8.insert(df8.columns.size,'onset',onset)\n #pdb.set_trace()\n df8.sort(columns=['NEC','Patient','time_collected'],inplace=True)\n df8.drop('index',inplace=True,axis=1)\n missing=list()\n for item in matched1.Sample:\n if item in df8.Patient.values:\n pass\n else:\n missing.append(item)\n#pdb.set_trace()\n with open(output2 ,'w') as output:\n df8.to_csv(output, sep='\\t',index=False)\n\n with open(\"missing.txt\" ,'w') as output:\n for item in missing:\n output.write('%s\\n' % item)\n else:\n samp=args.sample\n samp1=read_csv(samp,sep='\\t',dtype=object)\n samp1.sort('Sample',inplace=True)\n samp1.set_index('Sample',inplace=True)\n df6=df5[df5.index.isin(samp1.index)]\n df7=concat([df5,dob['DOB']],axis=1,join_axes=[df5.index])\n df7.reset_index(inplace=True)\n df7['DOL']=df7['time_collected']-df7['DOB']\n new=list()\n for item in df7['DOL']:\n new.append(float(\"{0:.2f}\".format(item.total_seconds()/86400)))\n \n df7.insert(df7.columns.size, \"DOL_dec\",new)\n df8=df7.sort(columns=['NEC','Patient','time_collected'])\n#pdb.set_trace()\n with open(output2 ,'w') as output:\n df8.to_csv(output, sep='\\t',index=False)","repo_name":"brigidar/sample_selection","sub_path":"Nb_samples.py","file_name":"Nb_samples.py","file_ext":"py","file_size_in_byte":10900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8861893335","text":"#\n# Created by djz on 2021/01/21.\n#\n\"\"\"EET fairseq gpt2 model. \"\"\"\nimport os\nimport math\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom fairseq.models.transformer import TransformerDecoder\nfrom fairseq.models.transformer import TransformerEncoder\nfrom fairseq.modules import AdaptiveSoftmax\nfrom fairseq import options\nfrom fairseq import utils\nfrom fairseq.data.dictionary import Dictionary\n\nfrom ..pipelines.generation import GenerationMixin_EET\nfrom transformers.file_utils import ModelOutput\nfrom transformers import GPT2Config\nfrom EET import MetaDesc as meta_desc\nfrom EET import LayerNorm as eet_layernorm\nfrom EET import Embedding as eet_embedding\nfrom EET import FeedForwardNetwork as eet_ffn\nfrom EET import MaskedMultiHeadAttention as eet_attention\nfrom EET import CrossMultiHeadAttention as eet_cross_attention\nfrom EET import MultiHeadAttention as eet_encoder_attention\n\nFROM_TORCH_PARAM_LEN = 17\nFROM_BUFFER_PARAM_LEN = 9\nDEFAULT_MAX_TARGER_POSITIONS = 1024\n\n__all__ = [\n 'EETTransformerLayerNorm', 'EETTransformerEmbedding', 'EETTransformerFeedforward', 'EETTransformerAttention',\n 'EETTransformerDecoderLayer', 'EETTransformerDecoder'\n]\n\n\nclass CausalLMOutputWithCrossAttentions(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass EETTransformerLayerNorm():\n def __init__(self, args, meta_des, layernorm_weights, layernorm_bias, data_type=torch.float32):\n self.layernorm_weights = layernorm_weights.cuda().type(data_type)\n self.layernorm_bias = layernorm_bias.cuda().type(data_type)\n self.layernorm = eet_layernorm(meta_des, self.layernorm_weights, self.layernorm_bias)\n\n def __call__(self,\n input_ids):\n # positional_encode + embedding_lookup\n return self.layernorm.layer_norm(input_ids)\n\n @staticmethod\n def from_torch(args, meta_des, layernorm_weights, layernorm_bias, data_type=torch.float32):\n layernorm = EETTransformerLayerNorm(args, meta_des, layernorm_weights, layernorm_bias, data_type=data_type)\n return layernorm\n\n\nclass EETTransformerEmbedding():\n def __init__(self, args, meta_des, embedding_weights, data_type):\n self.embedding_weights = embedding_weights.cuda().type(data_type)\n self.padding_idx = 1\n self.weight = self.embedding_weights\n self.embed_scale = args.no_scale_embedding\n self.embedding = eet_embedding(meta_des, self.embedding_weights, self.embedding_weights, self.embedding_weights, self.embedding_weights, self.embedding_weights, \"emb_cache\")\n\n def __call__(self,\n tokens,\n positions,\n first_pass):\n # positional_encode + embedding_lookup\n return self.embedding.forward_fairseq(tokens, positions, self.embed_scale, self.padding_idx, first_pass)\n\n @staticmethod\n def from_torch(args, meta_des, embedding_weights, data_type=torch.float32):\n feedforward = EETTransformerEmbedding(args, meta_des, embedding_weights, data_type)\n return feedforward\n\n\nclass EETTransformerFeedforward():\n def __init__(self, meta_des, model_dict, data_type=torch.float32, name=\"ffn_out_cache\"):\n\n self.intermediate_weights = torch.t([x[1] for x in model_dict.items() if 'fc1.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.intermediate_bias = [x[1] for x in model_dict.items() if 'fc1.bias' in x[0]][0].cuda().type(data_type)\n self.output_weights = torch.t([x[1] for x in model_dict.items() if 'fc2.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.output_bias = [x[1] for x in model_dict.items() if 'fc2.bias' in x[0]][0].cuda().type(data_type)\n self.layernorm_weights = [x[1] for x in model_dict.items() if 'final_layer_norm.weight' in x[0]][0].cuda().type(data_type)\n self.layernorm_bias = [x[1] for x in model_dict.items() if 'final_layer_norm.bias' in x[0]][0].cuda().type(data_type)\n\n self.ffn = eet_ffn(meta_des, self.intermediate_weights, self.intermediate_bias, self.output_weights, self.output_bias, self.layernorm_weights, self.layernorm_bias, name)\n\n def __call__(self,\n input_id,\n pre_layernorm=True,\n add_residual=True):\n return self.ffn.forward(input_id, pre_layernorm, add_residual)\n\n @staticmethod\n def from_torch(meta_des, model_dict, data_type=torch.float32):\n feedforward = EETTransformerFeedforward(meta_des, model_dict, data_type=data_type)\n return feedforward\n\n\nclass EETTransformerAttention():\n def __init__(self, meta_des, model_dict, no_encoder_attn=False, data_type=torch.float32, is_encoder=False):\n self.is_encoder = is_encoder\n if is_encoder is False:\n if no_encoder_attn is True:\n q_weights = [x[1] for x in model_dict.items() if 'self_attn.q_proj.weight' in x[0]][0].contiguous().cuda().type(data_type)\n k_weights = [x[1] for x in model_dict.items() if 'self_attn.k_proj.weight' in x[0]][0].contiguous().cuda().type(data_type)\n v_weights = [x[1] for x in model_dict.items() if 'self_attn.v_proj.weight' in x[0]][0].contiguous().cuda().type(data_type)\n self.qkv_weight = torch.cat((q_weights, k_weights, v_weights), 0).transpose(0, 1).contiguous()\n self.q_bias = [x[1] for x in model_dict.items() if 'self_attn.q_proj.bias' in x[0]][0].cuda().type(data_type)\n self.k_bias = [x[1] for x in model_dict.items() if 'self_attn.k_proj.bias' in x[0]][0].cuda().type(data_type)\n self.v_bias = [x[1] for x in model_dict.items() if 'self_attn.v_proj.bias' in x[0]][0].cuda().type(data_type)\n self.out_weights = torch.t([x[1] for x in model_dict.items() if 'self_attn.out_proj.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.out_bias = [x[1] for x in model_dict.items() if 'self_attn.out_proj.bias' in x[0]][0].cuda().type(data_type)\n self.layernorm_weights = [x[1] for x in model_dict.items() if 'self_attn_layer_norm.weight' in x[0]][0].cuda().type(data_type)\n self.layernorm_bias = [x[1] for x in model_dict.items() if 'self_attn_layer_norm.bias' in x[0]][0].cuda().type(data_type)\n self.attention = eet_attention(meta_des, self.qkv_weight, self.q_bias, self.k_bias, self.v_bias, self.out_weights, self.out_bias, self.layernorm_weights, self.layernorm_bias)\n\n else:\n self.q_weights = torch.t([x[1] for x in model_dict.items() if 'encoder_attn.q_proj.weight' in x[0]][0]).contiguous().cuda()\n self.k_weights = torch.t([x[1] for x in model_dict.items() if 'encoder_attn.k_proj.weight' in x[0]][0]).contiguous().cuda()\n self.v_weights = torch.t([x[1] for x in model_dict.items() if 'encoder_attn.v_proj.weight' in x[0]][0]).contiguous().cuda()\n self.q_bias = [x[1] for x in model_dict.items() if 'encoder_attn.q_proj.bias' in x[0]][0].cuda()\n self.k_bias = [x[1] for x in model_dict.items() if 'encoder_attn.k_proj.bias' in x[0]][0].cuda()\n self.v_bias = [x[1] for x in model_dict.items() if 'encoder_attn.v_proj.bias' in x[0]][0].cuda()\n self.out_weights = torch.t([x[1] for x in model_dict.items() if 'encoder_attn.out_proj.weight' in x[0]][0].contiguous()).cuda()\n self.out_bias = [x[1] for x in model_dict.items() if 'encoder_attn.out_proj.bias' in x[0]][0].cuda()\n self.layernorm_weights = [x[1] for x in model_dict.items() if 'encoder_attn_layer_norm.weight' in x[0]][0].cuda()\n self.layernorm_bias = [x[1] for x in model_dict.items() if 'encoder_attn_layer_norm.bias' in x[0]][0].cuda()\n self.attention = eet_cross_attention(meta_des, self.q_weights, self.k_weights, self.v_weights, self.q_bias, self.k_bias,\n self.v_bias, self.out_weights, self.out_bias, self.layernorm_weights, self.layernorm_bias)\n else:\n # transformer encoder\n self.q_weights = torch.t([x[1] for x in model_dict.items() if 'self_attn.q_proj.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.k_weights = torch.t([x[1] for x in model_dict.items() if 'self_attn.k_proj.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.v_weights = torch.t([x[1] for x in model_dict.items() if 'self_attn.v_proj.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.q_bias = [x[1] for x in model_dict.items() if 'self_attn.q_proj.bias' in x[0]][0].cuda().type(data_type)\n self.k_bias = [x[1] for x in model_dict.items() if 'self_attn.k_proj.bias' in x[0]][0].cuda().type(data_type)\n self.v_bias = [x[1] for x in model_dict.items() if 'self_attn.v_proj.bias' in x[0]][0].cuda().type(data_type)\n self.out_weights = torch.t([x[1] for x in model_dict.items() if 'self_attn.out_proj.weight' in x[0]][0]).contiguous().cuda().type(data_type)\n self.out_bias = [x[1] for x in model_dict.items() if 'self_attn.out_proj.bias' in x[0]][0].cuda().type(data_type)\n self.layernorm_weights = [x[1] for x in model_dict.items() if 'self_attn_layer_norm.weight' in x[0]][0].cuda().type(data_type)\n self.layernorm_bias = [x[1] for x in model_dict.items() if 'self_attn_layer_norm.bias' in x[0]][0].cuda().type(data_type)\n self.attention = eet_encoder_attention(meta_des, self.q_weights, self.k_weights, self.v_weights, self.q_bias, self.k_bias,\n self.v_bias, self.out_weights, self.out_bias, self.layernorm_weights, self.layernorm_bias)\n\n def __call__(self,\n input_id,\n pre_padding_len,\n reorder_state=None,\n encoder_out=None,\n encoder_padding_mask=None,\n pre_layernorm=True,\n add_residual=True,\n first_pass=False):\n\n if self.is_encoder:\n return self.attention.forward(input_id, pre_padding_len, pre_layernorm, add_residual)\n else:\n if encoder_out is None:\n # self_atten\n return self.attention.forward(input_id, pre_padding_len, reorder_state, pre_layernorm, add_residual, first_pass)\n else:\n # cross_atten\n return self.attention.forward(input_id, encoder_out, pre_padding_len, pre_layernorm, add_residual, encoder_padding_mask, first_pass)\n\n @staticmethod\n def from_torch(meta_des, model_dict, no_encoder_attn=True, data_type=torch.float32, is_encoder=False):\n attention = EETTransformerAttention(meta_des, model_dict, no_encoder_attn, data_type=data_type, is_encoder=is_encoder)\n return attention\n\n\nclass EETTransformerDecoderLayer():\n def __init__(self, args, attention, feedforward, cross_attention=None, no_encoder_attn=False):\n self.args = args\n self.attetion = attention\n self.feedforward = feedforward\n if no_encoder_attn == False:\n self.cross_attention = cross_attention\n self.pre_layernorm = args.decoder_normalize_before\n\n self.add_residual = True\n\n def __call__(self,\n x,\n pre_padding_len=None,\n reorder_state=None,\n encoder_out=None,\n encoder_padding_mask=None,\n first_pass=False):\n if encoder_out is not None:\n ''' self_attn -> cross_attn -> ffn'''\n self_attn_out = self.attetion(input_id=x,\n pre_padding_len=pre_padding_len,\n reorder_state=reorder_state,\n pre_layernorm=self.pre_layernorm,\n add_residual=self.add_residual,\n first_pass=first_pass)\n cross_attn_out = self.cross_attention(input_id=self_attn_out,\n pre_padding_len=pre_padding_len,\n encoder_out=encoder_out,\n encoder_padding_mask=encoder_padding_mask,\n pre_layernorm=self.pre_layernorm,\n add_residual=self.add_residual,\n first_pass=first_pass)\n out = self.feedforward(cross_attn_out,\n pre_layernorm=self.pre_layernorm,\n add_residual=self.add_residual)\n else:\n ''' self_attn -> ffn'''\n self_attn_out = self.attetion(input_id=x,\n pre_padding_len=pre_padding_len,\n reorder_state=reorder_state,\n pre_layernorm=self.pre_layernorm,\n add_residual=self.add_residual,\n first_pass=first_pass)\n out = self.feedforward(self_attn_out,\n pre_layernorm=self.pre_layernorm,\n add_residual=self.add_residual)\n return out\n\n @staticmethod\n def from_torch(args, meta_des, model_dict, no_encoder_attn=False, data_type=torch.float32):\n attention = EETTransformerAttention.from_torch(meta_des=meta_des, model_dict=model_dict, data_type=data_type, is_encoder=False)\n feedforward = EETTransformerFeedforward.from_torch(meta_des=meta_des, model_dict=model_dict, data_type=data_type)\n if no_encoder_attn == False:\n cross_attention = EETTransformerAttention.from_torch(meta_des=meta_des, model_dict=model_dict, no_encoder_attn=no_encoder_attn, data_type=data_type, is_encoder=True)\n layer = EETTransformerDecoderLayer(args, attention, feedforward, cross_attention, no_encoder_attn)\n else:\n layer = EETTransformerDecoderLayer(args, attention, feedforward, no_encoder_attn)\n return layer\n\n\nclass EETTransformerDecoder(GenerationMixin_EET):\n \"\"\"\n EETTransformerDecoder consisting of layers. Each layer\n is a :class:`EETTransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n embedding(EETTransformerEmbedding) : class: 'EETTransformerEmbedding'\n DecoderLayers(EETTransformerDecoderLayer) class: 'EETTransformerDecoderLayer'\n layer_norm(EETTransformerLayerNorm) class: 'EETTransformerLayerNorm'\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, gpt2_config, max_batch, dictionary, embed_tokens, DecoderLayers, layer_norm):\n self.layers = DecoderLayers\n self.layer_norm = layer_norm\n self.cross_self_attention = False\n self.adaptive_softmax = None\n self.share_input_output_embed = args.share_decoder_input_output_embed\n self.output_embed_dim = args.decoder_output_dim\n self.embed_tokens = embed_tokens\n self.pre_padding_len = torch.empty(0).long()\n self.positions = torch.empty(0).long()\n # self.pre_padding_len = None\n self.reorder_state = torch.empty(0).long()\n self.config = gpt2_config\n self.max_target_positions = args.max_target_positions\n self.main_input_name = \"input_ids\"\n self.device = \"cuda:0\"\n\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif not self.share_input_output_embed:\n self.embed_out = nn.Parameter(\n torch.Tensor(len(dictionary), self.output_embed_dim)\n )\n nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)\n\n def prepare_inputs_for_generation(self, input_ids, first_pass=True, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if first_pass == False:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if first_pass == False:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n position_ids = position_ids.contiguous()\n else:\n position_ids = None\n input_ids = input_ids.contiguous()\n\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n def forward(\n self,\n input_ids,\n reorder_state=None,\n encoder_out: Optional[Dict[str, List[Tensor]]] = None,\n features_only: bool = False,\n first_pass=False\n ):\n \"\"\"\n Args:\n prev_output_tokens : (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n first_pass:full decoder or incremental decoder,first step is full decoder,other is incremental decoder.\n Returns:\n the decoder's output of shape `(batch, tgt_len, vocab)`\n \"\"\"\n if first_pass:\n mask = input_ids.ne(self.embed_tokens.padding_idx).int()\n positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.embed_tokens.padding_idx\n pre_padding_len = (input_ids.size(1) - torch.sum(mask, 1)).cuda()\n self.pre_padding_len = pre_padding_len.long().cuda()\n self.positions = positions.cuda()\n else:\n self.positions = self.pre_padding_len\n positions = self.pre_padding_len\n\n if reorder_state is not None:\n self.reorder_state = reorder_state.long()\n positions = torch.index_select(self.positions, dim=0, index=reorder_state)\n # pre_padding_len = torch.index_select(self.pre_padding_len, dim=0, index=reorder_state)\n x = self.embed_tokens(input_ids, positions, first_pass)\n\n if (encoder_out is not None and len(encoder_out[\"encoder_padding_mask\"]) > 0):\n encoder_padding_mask = encoder_out[\"encoder_padding_mask\"][0]\n else:\n encoder_padding_mask = None\n\n if (encoder_out is not None and len(encoder_out[\"encoder_out\"]) > 0):\n encoder_out = encoder_out[\"encoder_out\"][0]\n else:\n encoder_out = None\n\n # if reorder_state is not None:\n # self.reorder_state = reorder_state.long()\n\n for layer in self.layers:\n x = layer(x,\n pre_padding_len=self.pre_padding_len,\n reorder_state=self.reorder_state,\n encoder_out=encoder_out,\n encoder_padding_mask=encoder_padding_mask,\n first_pass=first_pass)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n if not features_only:\n x = self.output_layer(x)\n lm_logits = x\n return CausalLMOutputWithCrossAttentions(\n loss=None,\n logits=lm_logits,\n past_key_values=None,\n hidden_states=None,\n attentions=None,\n cross_attentions=None,\n )\n\n def __call__(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n first_pass=True,\n reorder_state=None,\n encoder_out: Optional[Dict[str, List[Tensor]]] = None,\n features_only: bool = False,\n self_past_key_values_length=0,\n ):\n return self.forward(\n input_ids=input_ids,\n first_pass=first_pass,\n reorder_state=reorder_state,\n encoder_out=encoder_out,\n features_only=features_only,\n )\n\n def output_layer(self, features):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n\n return F.linear(features, self.embed_out)\n else:\n return features\n\n def max_decoder_positions(self):\n if self.max_target_positions is not None:\n return self.max_target_positions\n else:\n return DEFAULT_MAX_TARGER_POSITIONS\n\n @staticmethod\n def from_pretrained(model_id_or_path: str, max_batch, full_seq_len, data_type, no_encoder_attn=True):\n \"\"\"from_pretrained.\"\"\"\n \"\"\"\n Args:\n model_id_or_path : pytorch model path\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n config:dic[max_full_seq_len,max_batch,data_type]\n { \n full_seq_len: The maximum length that can be supported by full decoding \n max_batch: the largest batch_size that can be supported, and it is supported if it is smaller than max_batch, so as to support dynamic batch\n data_type: data_type (default: torch.float32)\n }\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n Returns:\n eet_decoder : EETTransformerDecoder\n \"\"\"\n\n torch.set_grad_enabled(False)\n pretrained_dict = torch.load(model_id_or_path+'/checkpoint_best.pt')\n dictionary = Dictionary.load(model_id_or_path + '/dict.txt')\n if os.path.exists(model_id_or_path + '/config.json'):\n gpt2_config = GPT2Config.from_pretrained(model_id_or_path)\n else:\n gpt2_config = None\n full_seq_len = full_seq_len\n batch_size = max_batch\n # data_type = data_type\n\n model_dict = {}\n DecoderLayers = []\n for k, v in pretrained_dict['model'].items():\n model_dict[k] = v\n from itertools import groupby\n # Intercept k,num = length of 'decoder.layers.**'=17; If your weight name has changed please change it here\n model_dict_list = list(model_dict.items())\n model_dict_list.sort(key=lambda item: item[0][:FROM_TORCH_PARAM_LEN])\n layer_model_dict = {k: dict(v) for k, v in groupby(model_dict_list, lambda item: item[0][:FROM_TORCH_PARAM_LEN])}\n device = \"cuda:0\"\n args = pretrained_dict['args']\n activation_fn = args.activation_fn\n\n if args.max_target_positions is None:\n args.max_target_positions = DEFAULT_MAX_TARGER_POSITIONS\n meta_des = meta_desc(dtype=data_type,\n batch_size=batch_size,\n head_num=args.decoder_attention_heads,\n hidden_units=args.decoder_embed_dim,\n layer_num=args.decoder_layers,\n max_seq_len=args.max_target_positions,\n max_full_seq_len=full_seq_len,\n activation_fn=activation_fn,\n cuda_device=device)\n embedding = EETTransformerEmbedding.from_torch(args, meta_des, model_dict['decoder.embed_tokens.weight'], data_type)\n\n if args.decoder_normalize_before:\n layer_norm = EETTransformerLayerNorm.from_torch(args, meta_des, model_dict['decoder.layer_norm.weight'], model_dict['decoder.layer_norm.bias'], data_type)\n else:\n layer_norm = None\n for i in range(args.decoder_layers):\n if i < 10:\n DecoderLayers.extend(\n [\n EETTransformerDecoderLayer.from_torch(args, meta_des, layer_model_dict['decoder.layers.'+str(i)+'.'], no_encoder_attn, data_type)\n ]\n )\n else:\n DecoderLayers.extend(\n [\n EETTransformerDecoderLayer.from_torch(args, meta_des, layer_model_dict['decoder.layers.'+str(i)], no_encoder_attn, data_type)\n ]\n )\n\n eet_decoder = EETTransformerDecoder(args, gpt2_config, batch_size, dictionary, embedding, DecoderLayers, layer_norm)\n\n return eet_decoder\n\n @staticmethod\n def from_torch(torch_decoder, dictionary, args, config: dict, no_encoder_attn=False):\n \"\"\"from torch.\"\"\"\n \"\"\"\n Args:\n torch_decoder : TransformerDecoder buffer\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n args (argparse.Namespace): parsed command-line arguments\n config:dic[full_seq_len,max_batch,data_type]\n { \n full_seq_len: The maximum length that can be generated \n max_batch: the largest batch_size that can be supported, and it is supported if it is smaller than max_batch, so as to support dynamic batch\n data_type: data_type (default: torch.float32)\n }\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n Returns:\n eet_decoder : EETTransformerDecoder\n \"\"\"\n\n torch.set_grad_enabled(False)\n # pretrained_dict = torch.load(model_id_or_path)\n\n full_seq_len = config['full_seq_len']\n batch_size = config['max_batch']\n data_type = config['data_type']\n\n model_dict = {}\n DecoderLayers = []\n\n for k, v in dict(torch_decoder.state_dict()).items():\n model_dict[k] = v\n from itertools import groupby\n # Intercept k,num = length of 'decoder.layers.**'=17; If your weight name has changed please change it here\n model_dict_list = list(model_dict.items())\n model_dict_list.sort(key=lambda item: item[0][:FROM_TORCH_PARAM_LEN])\n layer_model_dict = {k: dict(v) for k, v in groupby(model_dict_list, lambda item: item[0][:FROM_BUFFER_PARAM_LEN])}\n\n device = \"cuda:0\"\n activation_fn = args.activation_fn\n if args.max_target_positions is None:\n args.max_target_positions = DEFAULT_MAX_TARGER_POSITIONS\n\n meta_des = meta_desc(dtype=data_type,\n batch_size=batch_size,\n head_num=args.decoder_attention_heads,\n hidden_units=args.decoder_embed_dim,\n layer_num=args.decoder_layers,\n max_seq_len=args.max_target_positions,\n max_full_seq_len=full_seq_len,\n activation_fn=activation_fn,\n cuda_device=device)\n embedding = EETTransformerEmbedding.from_torch(args, meta_des, model_dict['embed_tokens.weight'], data_type)\n\n if args.decoder_normalize_before:\n layer_norm = EETTransformerLayerNorm.from_torch(args, meta_des, model_dict['layer_norm.weight'], model_dict['layer_norm.bias'], data_type)\n else:\n layer_norm = None\n for i in range(args.decoder_layers):\n if i < 10:\n DecoderLayers.extend(\n [\n EETTransformerDecoderLayer.from_torch(args, meta_des, layer_model_dict['layers.'+str(i)+'.'], no_encoder_attn, data_type)\n ]\n )\n else:\n DecoderLayers.extend(\n [\n EETTransformerDecoderLayer.from_torch(args, meta_des, layer_model_dict['layers.'+str(i)], no_encoder_attn, data_type)\n ]\n )\n gpt2_config = None\n eet_decoder = EETTransformerDecoder(args, gpt2_config, batch_size, dictionary, embedding, DecoderLayers, layer_norm)\n\n return eet_decoder\n\n @staticmethod\n def from_fairseq_pretrained(model_id_or_path: str, dictionary, args, config: dict, no_encoder_attn=False, device=\"cuda:0\"):\n \"\"\"from_pretrained.\"\"\"\n \"\"\"\n Args:\n model_id_or_path : pytorch model path\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n config:dic[max_full_seq_len,max_batch,data_type]\n { \n full_seq_len: The maximum length that can be supported by full decoding \n max_batch: the largest batch_size that can be supported, and it is supported if it is smaller than max_batch, so as to support dynamic batch\n data_type: data_type (default: torch.float32)\n }\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n Returns:\n eet_decoder : EETTransformerDecoder\n \"\"\"\n\n torch.set_grad_enabled(False)\n pretrained_dict = torch.load(model_id_or_path)\n gpt2_config = None\n full_seq_len = config['full_seq_len']\n batch_size = config['max_batch']\n data_type = config['data_type']\n\n model_dict = {}\n DecoderLayers = []\n for k, v in pretrained_dict['model'].items():\n model_dict[k] = v\n from itertools import groupby\n # Intercept k,num = length of 'decoder.layers.**'=17; If your weight name has changed please change it here\n model_dict_list = list(model_dict.items())\n model_dict_list.sort(key=lambda item: item[0][:FROM_TORCH_PARAM_LEN])\n layer_model_dict = {k: dict(v) for k, v in groupby(model_dict_list, lambda item: item[0][:FROM_TORCH_PARAM_LEN])}\n\n device = device\n activation_fn = args.activation_fn\n\n if args.max_target_positions is None:\n args.max_target_positions = DEFAULT_MAX_TARGER_POSITIONS\n meta_des = meta_desc(dtype=data_type,\n batch_size=batch_size,\n head_num=args.decoder_attention_heads,\n hidden_units=args.decoder_embed_dim,\n layer_num=args.decoder_layers,\n max_seq_len=args.max_target_positions,\n max_full_seq_len=full_seq_len,\n activation_fn=activation_fn,\n cuda_device=device)\n embedding = EETTransformerEmbedding.from_torch(args, meta_des, model_dict['decoder.embed_tokens.weight'], data_type)\n\n if args.decoder_normalize_before:\n layer_norm = EETTransformerLayerNorm.from_torch(args, meta_des, model_dict['decoder.layer_norm.weight'], model_dict['decoder.layer_norm.bias'], data_type)\n else:\n layer_norm = None\n for i in range(args.decoder_layers):\n if i < 10:\n DecoderLayers.extend(\n [\n EETTransformerDecoderLayer.from_torch(args, meta_des, layer_model_dict['decoder.layers.'+str(i)+'.'], no_encoder_attn, data_type)\n ]\n )\n else:\n DecoderLayers.extend(\n [\n EETTransformerDecoderLayer.from_torch(args, meta_des, layer_model_dict['decoder.layers.'+str(i)], no_encoder_attn, data_type)\n ]\n )\n\n eet_decoder = EETTransformerDecoder(args, gpt2_config, batch_size, dictionary, embedding, DecoderLayers, layer_norm)\n return eet_decoder\n","repo_name":"NetEase-FuXi/EET","sub_path":"python/eet/fairseq/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":33312,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"2"} +{"seq_id":"41628288371","text":"#!/usr/bin/python3\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nimport numpy as np\n\n# Initialize figure with 4 3D subplots\nfig = make_subplots(\n rows=2, cols=2,\n specs=[[{'type': 'surface'}, {'type': 'surface'}],\n [{'type': 'surface'}, {'type': 'surface'}]])\n\n\nx=np.linspace(-50, 50, 100)\ny=np.linspace(-50, 50, 100)\n\n\nxGrid, yGrid = np.meshgrid(y, x)\n\n\nz = -2 * (xGrid ** 6) + 4 * (yGrid ** 8) - 4 * (yGrid ** 6)\n\n\nfig.add_trace(\n go.Surface(x=x, y=y, z=z, colorscale='phase', showscale=False),\n row=1, col=1)\n\n\nz1 = xGrid ** 2 + yGrid ** 4\n\n# adding surfaces to subplots.\n\n\n\nfig.add_trace(\n go.Surface(x=x, y=y, z=z1, colorscale='turbo', showscale=False),\n row=1, col=2)\n\n\n\nfig.update_layout( \n scene1 = dict(\n xaxis = dict(nticks=4, range=[-25,25],),\n yaxis = dict(nticks=4, range=[-25,25],),\n zaxis = dict(nticks=4, range=[-20,20],),),\n title_text='3D subplots with different colorscales',\n height=2000,\n width=2000\n)\n\n\n\n\nfig.update_layout( \n scene2 = dict(\n xaxis = dict(nticks=4, range=[-2,2.5],),\n yaxis = dict(nticks=4, range=[-2,2.5],),\n zaxis = dict(nticks=4, range=[-5,25],),),\n title_text='3D subplots with different colorscales',\n\n)\n\n\nfig.show()","repo_name":"uslsteen/4_sem","sub_path":"Theormech_problem/subplots.py","file_name":"subplots.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3512201479","text":"def myHash0(value):\n return len(value)\n\n\nbobAddress = myHash0('bob')\ndadAddress = myHash0('dad')\nahmedAddress = myHash0('ahmed')\n\nprint(bobAddress)\n\n# -------------------------------\n\n# solution 1\nmyArray = [None] * 8\n\nmyArray[bobAddress] = 'bob'\nmyArray[ahmedAddress] = 'ahmed'\n\nprint(myArray[bobAddress])\n\nprint(myArray[myHash0('bob')])\n\n# this is fast but not unique at all NAME COLLISION\n# dad and bob both has 3 letters so location both would be 3 so bad bad bad\n# this is non-invertable it can't go backwards and it's pretty secure\n\n# --------------------------------\n\n# solution 2\ndef myHash1(key):\n stringIndex = 0\n for char in key:\n if char == 'a':\n stringIndex += 1\n\n if char == 'b':\n stringIndex += 2\n\n return stringIndex\n\n# this is also a better but slower way\n# no name collision dad and bob give different numbers\n# dad then goes to index of 9 but it is now kind of invertable it lowers the list of potential words to reverse engineer \n\ndef myHash3(key,scrambleNumber):\n stringIndex = 0\n scrambleNumber = 17\n for char in key:\n if char == 'a':\n stringIndex += 1\n stringIndex *= scrambleNumber\n\n if char == 'b':\n stringIndex += 2\n stringIndex *= scrambleNumber\n\n return stringIndex","repo_name":"dlimla/Hash-Tables","sub_path":"src/classWork.py","file_name":"classWork.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"11512014179","text":"import re\nimport requests\nfrom datetime import datetime, timedelta\n\nimport os\nimport copy\nimport json\n\nfrom utils import clone_repo\n\nQUERY = \"\"\"\n{\n search(query: \"is:public mirror:false archived:false created:DATE_FROM..DATE_TO\", type: REPOSITORY, first: 50 AFTER) {\n repositoryCount\n pageInfo {\n endCursor\n startCursor\n hasNextPage\n }\n edges {\n node {\n ... on Repository {\n id\n owner { login }\n name\n url\n createdAt\n description\n primaryLanguage { name }\n object(expression: \"master:\") {\n ... on Tree {\n entries {\n name\n type\n }\n }\n }\n }\n }\n }\n }\n rateLimit {\n limit\n cost\n remaining\n resetAt\n }\n}\n\"\"\"\n\n\nclass GithubMiner():\n\n def __init__(self,\n token, \n date_from: datetime, \n date_to: datetime\n ):\n\n self.date_from = date_from.strftime('%Y-%m-%dT%H:%M:%SZ') \n self.date_to = date_to.strftime('%Y-%m-%dT%H:%M:%SZ')\n\n self.quota = 0\n self.quota_reset_at = None\n\n self.query = re.sub('DATE_FROM', str(self.date_from), QUERY) \n self.query = re.sub('DATE_TO', str(self.date_to), self.query) \n\n def run_query(self, query): \n \"\"\"\n Run a graphql query \n \"\"\"\n request = requests.post('https://api.github.com/graphql', json={'query': query}, headers={'Authorization': token})\n \n if request.status_code == 200:\n return request.json()\n \n else:\n print(\"Query failed to run by returning code of {}. {}\".format(request.status_code, query))\n \n with open(\"logs/failed_queries.txt\", \"a+\") as file:\n file.write(f'{self.date_from} {self.date_to} \\n')\n\n return None\n\n def filter_repositories(self, edges):\n\n for node in edges:\n \n node = node.get('node')\n\n if not node:\n continue\n\n object = node.get('object')\n if not object:\n continue\n \n dirs = [entry.get('name') for entry in object.get('entries', []) if entry.get('type') == 'tree']\n\n yield dict(\n id=node.get('id'),\n default_branch=node.get('defaultBranchRef', {}).get('name'),\n owner=node.get('owner', {}).get('login', ''),\n name=node.get('name', ''),\n url=node.get('url'),\n description=node['description'] if node['description'] else '',\n dirs=dirs,\n createdAt=node.get('createdAt', 'No date found')\n )\n\n def mine(self):\n \n has_next_page = True\n end_cursor = None\n\n while has_next_page:\n \n tmp_query = re.sub('AFTER', '', self.query) if not end_cursor else re.sub('AFTER', f', after: \"{end_cursor}\"', self.query)\n \n result = self.run_query(tmp_query)\n\n if not result:\n break\n \n if not result.get('data'):\n break\n\n if not result['data'].get('search'):\n break\n \n self.quota = int(result['data']['rateLimit']['remaining'])\n self.quota_reset_at = result['data']['rateLimit']['resetAt']\n\n has_next_page = bool(result['data']['search']['pageInfo'].get('hasNextPage'))\n end_cursor = str(result['data']['search']['pageInfo'].get('endCursor'))\n\n edges = result['data']['search'].get('edges', [])\n\n for repo in self.filter_repositories(edges):\n yield repo\n\n\ndef main(token, date_from, date_to):\n \n github_miner = GithubMiner(\n token=token,\n date_from=date_from,\n date_to=date_to\n )\n \n i = 0 \n \n for repo in github_miner.mine():\n \n i += 1\n\n if (re.search(r\"\\btosca\\b\", repo['description'].lower()) or re.search(r\"\\btosca\\b\", repo['owner'].lower()) or re.search(r\"\\btosca\\b\", repo['name'].lower())):\n clone_repo(repo['owner'], repo['name'])\n\n else:\n continue\n \n \n print(f'{i} repositories mined')\n print(f'Quota: {github_miner.quota}')\n print(f'Quota will reset at: {github_miner.quota_reset_at}')\n print('---------------')\n\n\n with open(\"logs/executed_queries.txt\", \"a+\") as file:\n file.write(f'mined: {i} from: {date_from} to: {date_to} \\n')\n\n\nif __name__=='__main__':\n date_from = datetime.strptime('2014-03-27 00:00:00', '%Y-%m-%d %H:%M:%S')\n date_to = datetime.strptime('2014-03-27 12:00:00', '%Y-%m-%d %H:%M:%S')\n now = datetime.strptime('2020-03-31 00:00:00', '%Y-%m-%d %H:%M:%S')\n\n while date_to <= now:\n print(f'Searching for: {date_from}..{date_to}. Analysis started at {str(datetime.now())}')\n main(date_from, date_to)\n date_from = date_to\n date_to += timedelta(hours=12)\n","repo_name":"VaCH2/tosca-analysis","sub_path":"dataminer/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"14323976089","text":"import time\nfrom enum import Enum\n\nfrom lxml import html\nfrom selenium import webdriver\nfrom selenium.webdriver import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nTELETAL_URL = \"https://www.teletal.hu/etlap/45\"\nPREFIX = \"//div[contains(@class,'menu-card-5-day')]/div[contains(@class,'menu-cell-text')]\"\n# translate is needed toi have lowercase input\nCSIRKEMELL = \"[contains(translate(normalize-space(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'),\" \\\n \"'csirkemell')]/ \"\n_100MS = 0.1\nSCROLL_STEPS = 17\nHIGH_PRICE = 999999\n\n\nclass Workdays(Enum):\n HÉTFŐ = 1\n KEDD = 2\n SZERDA = 3\n CSÜTÖRTÖK = 4\n PÉNTEK = 5\n\n\ndef get_browser():\n # get the webdriver in headless mode\n options = Options()\n options.headless = True\n service = Service(ChromeDriverManager().install())\n chrome = webdriver.Chrome(service=service, options=options)\n chrome.get(TELETAL_URL)\n return chrome\n\n\ndef scroll_down_to_end():\n # scrolls down to the end of the page\n for i in range(SCROLL_STEPS):\n time.sleep(_100MS)\n body.send_keys(Keys.PAGE_DOWN)\n\n\ndef extract_data():\n # extract days, meals and prices which contains csirkemell\n days = document.xpath(PREFIX + \"/div\" + CSIRKEMELL + \"following-sibling::div/a/@nap\")\n ingredients = document.xpath(PREFIX + CSIRKEMELL + \"div/text()\")\n prices = document.xpath(\n PREFIX + CSIRKEMELL + \"child::div[contains(@class,'menu-price-field')]/div/h6/strong/text()\")\n return days, ingredients, prices\n\n\ndef print_minimums(days, ingredients, prices):\n # count when the length are the same for days, ingredients annd prices\n if len(days) == len(ingredients) and len(ingredients) == len(prices):\n # initialize the output\n cheapest_csirkmell_at_weekdays = {day.name: ['Nincs', HIGH_PRICE] for day in Workdays}\n # remove the . and Ft from prices to be able to convert as integer\n meals = ((Workdays(int(day)).name, ingredient, price.replace('.', '').replace(' Ft', '')) for\n day, ingredient, price\n in\n zip(days, ingredients, prices))\n\n # Add cheaper meal to each day\n for meal in meals:\n # replace the initial data if the actual price is less\n if cheapest_csirkmell_at_weekdays[meal[0]][1] > int(meal[2]):\n cheapest_csirkmell_at_weekdays[meal[0]][0] = meal[1]\n cheapest_csirkmell_at_weekdays[meal[0]][1] = int(meal[2])\n\n print(cheapest_csirkmell_at_weekdays)\n else:\n raise Exception('Cannot print cheapest for all day',\n 'different length in days, ingredients, prices lists: ' + str(len(days)) + ', ' + str(len(\n ingredients)) + ', ' + str(len(prices)))\n\n\n# fetch the whole html\nbrowser = get_browser()\nbody = browser.find_element(By.TAG_NAME, 'html')\nscroll_down_to_end()\n\n# get the whole html\nhtml_page = browser.page_source\ntime.sleep(2)\n# replacing
is required to avoid counting multiple times the csirkemell text in case of ingredients\ndocument = html.fromstring(html_page.replace('
', ''))\n\n# get data from html and print output\ndays, ingredients, prices = extract_data()\nprint_minimums(days, ingredients, prices)\n\nbrowser.quit()\n","repo_name":"gyapeee/LearnPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12716883829","text":"\"\"\"\r\nNihal Wadhwa\r\nHomework 08: Linear Sort\r\nA visual of the complexities of the quick, merge, bumper, and linear sort compared to each other\r\n\"\"\"\r\n\r\nimport random\r\nimport time\r\nfrom merge_sort import merge_sort\r\nfrom quick_sort import quick_sort\r\n\r\nMAX_NUM = 300\r\n\r\n\r\n\r\ndef bumper_sort(data,k):\r\n \"\"\"\r\n Takes histogram of the original list and converts it back into a sorted order in a different list\r\n \"\"\"\r\n hist = [0] * (k+1)\r\n for v in data:\r\n hist[v] = hist[v]+1\r\n result = []\r\n for i in range(0,len(hist)):\r\n j = hist[i]\r\n while j >0:\r\n result.append(i)\r\n j = j-1\r\n return result\r\n\r\n\r\ndef warmup_test():\r\n \"\"\"\r\n Tests the bumper_sort function\r\n \"\"\"\r\n test = [2,5,3,0,2,3,0,3]\r\n print(\"Small List, unsorted:\", test)\r\n print(\"Small List, bump-sorted:\",bumper_sort(test,5))\r\n test_2=[]\r\n for n in range(0,1000):\r\n test_2.append(random.randint(0,MAX_NUM))\r\n print(\"Large List, unsorted:\",test_2)\r\n print(\"Large List, bump-sorted:\", bumper_sort(test_2,MAX_NUM),\"\\n\")\r\n\r\n\r\ndef perfcompare_test():\r\n \"\"\"\r\n Compares the time it takes sorting two different lists (one small, one large) using four different sorts for each list.\r\n \"\"\"\r\n small = []\r\n for n in range(0,1000):\r\n small.append(random.randint(0,MAX_NUM))\r\n \r\n big = []\r\n for n in range(0,1000000):\r\n big.append(random.randint(0,MAX_NUM))\r\n \r\n print(\"Sorting a randomized list of 1000 elements.\")\r\n merge_small_1 = time.time()\r\n merge_sort(small)\r\n merge_small_2 = time.time()\r\n print(\"merge_sort time:\",merge_small_2 - merge_small_1, \"seconds\")\r\n \r\n quick_small_1 = time.time()\r\n quick_sort(small)\r\n quick_small_2 = time.time()\r\n print(\"quick_sort time:\", quick_small_2 - quick_small_1, \"seconds\")\r\n \r\n bumper_small_1 = time.time()\r\n bumper_sort(small,MAX_NUM)\r\n bumper_small_2 = time.time()\r\n print(\"bumper_sort time:\", bumper_small_2 - bumper_small_1, \"seconds\")\r\n \r\n sort_small_1 = time.time()\r\n small.sort()\r\n sort_small_2 = time.time()\r\n print(\"sorted time:\", sort_small_2 - sort_small_1, \"seconds\", \"\\n\")\r\n\r\n print(\"Sorting a randomized list of 1000000\")\r\n merge_big_1 = time.time()\r\n merge_sort(big)\r\n merge_big_2 = time.time()\r\n print(\"merge_sort time:\", merge_big_2 - merge_big_1, \"seconds\")\r\n \r\n quick_big_1 = time.time()\r\n quick_sort(big)\r\n quick_big_2 = time.time()\r\n print(\"quick_sort time:\", quick_big_2 - quick_big_1, \"seconds\")\r\n \r\n bump_big_1 = time.time()\r\n bumper_sort(big,MAX_NUM)\r\n bump_big_2 = time.time()\r\n print(\"bumper_sort time:\", bump_big_2 - bump_big_1, \"seconds\")\r\n \r\n sort_big_1 = time.time()\r\n big.sort()\r\n sort_big_2 = time.time()\r\n print(\"sorted time:\", sort_big_2 - sort_big_1, \"seconds\")\r\n\r\n\r\n\r\ndef main():\r\n bumper_sort([2,5,3,0,2,3,0,3],5)\r\n warmup_test()\r\n perfcompare_test()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\n","repo_name":"nihal-wadhwa/Computer-Science-1","sub_path":"Homeworks/HW8/bumper_sort.py","file_name":"bumper_sort.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8262370345","text":"\"\"\"Collection of type aliases used in the project.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import (TYPE_CHECKING, Any, Dict, List, NewType, Optional, Tuple,\n TypedDict, Union)\n\nfrom . import Q_\n\nif TYPE_CHECKING:\n from .management.management_strategy import ManagementStrategy\n\n\nQuantity = NewType(\"Q_\", Q_) # type: ignore\n\"\"\"Pint Quantity type. Displayed as ``Q_``.\"\"\"\n\n\nTreeSoilInterface = Dict[str, Dict[str, float]]\n\"\"\"Interface between a tree and a soil model.\n\nShape: {tree_pool_from: {soil_pool_to_1: frac1}, ...}. See also values of\n:attr:`~.simulation_parameters.tree_soil_interfaces`.\n\"\"\"\n\n\nWoodProductInterface = Dict[str, Any]\n\"\"\"Interface between a tree and a wood product model.\n\nSee values of :attr:`~.simulation_parameters.wood_product_interfaces`.\n\"\"\"\n\n\nInputFluxes = Dict[str, Quantity]\n\"\"\"Fluxes into pools. Key is pool name.\"\"\"\n\n\nOutputFluxesInt = Dict[int, Quantity]\n\"\"\"Fluxes out of pools. Key is pool number not name.\"\"\"\n\n\nTreeExternalOutputFluxes = Dict[Tuple[str, Union[str, None]], Quantity]\n\"\"\"External output fluxes from a tree.\n\nExternal output fluxes from\n:class:`.trees.single_tree_C_model.SingleTreeCModel`.\nAccording to :obj:`~.type_aliases.TreeSoilInterface`,\n:class:`~.stand.Stand` distributes them to the atmosphere or\nto the soil or the atmosphere (pool_to=None).\n\"\"\"\n\n\nCuttingFluxes = Dict[Tuple[str, str], Quantity]\n\"\"\"Distribution of tree material at cutting or thinning.\n\n- (pool_from, pool_to): fraction, pool_to = None means output to atmosphere\n\"\"\"\n\n\nOneSpeciesParams = Dict[str, Any]\n\"\"\"Parameter dictionary for one species.\nExamples in :mod:`~trees.single_tree_params`.\n\"\"\"\n\n\nSpeciesParams = Dict[str, OneSpeciesParams]\n\"\"\"Parameter dictionary for all involved tree species.\n\nExample in :mod:`~.trees.single_tree_params`.\n\"\"\"\n\n\nMSData = Tuple[str, str]\n\"\"\"The tuple is a (Trigger, Action) pair for a MeanTree to plant.\n\nThe strings must be found in :mod:`~.management.library.py`.\n\"\"\"\n\n\nMSDataList = List[MSData]\n\"\"\"A list of management_strategy_data for a MeanTree to plant.\"\"\"\n\n\nSimulationProfile = List[Tuple[str, float, float, MSDataList, Optional[str]]]\n\"\"\"Simulation described by a list of trees.\n\nList[Tuple[tree_species, dbh_in_cm, N_in_m2-1, management_strategy_data, waiting or not]].\n'waiting' means the MeanTree is not immediately planted at the beginning of the simulation.\nSo it needs some kind of (trigger, plant_action) in the management_strategy_data.\nIt is optional, if omitted, the tree will be planted right away.\n\"\"\"\n\n\nTreeSetting = Tuple[int, Q_, Q_, \"ManagementStrategy\", Optional[str]]\n\"\"\"A setting of a MeanTree to be planted in the :class:`~.stand.Stand`.\n[nr_of_the_tree, dbh_in_cm, N_in_m2-1, management_strategy, 'waiting' or omitted].\n\"\"\"\n\n\nSpeciesSettings = Dict[str, List[TreeSetting]]\n\"\"\"The species setting of a new :class:`~.stand.Stand`.\n\nThe key is supposed to give the tree species.\n\"\"\"\n","repo_name":"goujou/BFCPM","sub_path":"src/BFCPM/type_aliases.py","file_name":"type_aliases.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34197838657","text":"#\r\n# @lc app=leetcode.cn id=77 lang=python3\r\n#\r\n# [77] 组合\r\n#\r\n\r\n# @lc code=start\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def combine(self, n: int, k: int) -> List[List[int]]:\r\n nums = list(range(1, n + 1))\r\n\r\n res = []\r\n\r\n def backtrack(nums, tmp):\r\n if len(tmp) == k:\r\n res.append(tmp[:])\r\n return\r\n\r\n for i in range(len(nums)):\r\n tmp.append(nums[i])\r\n backtrack(nums[i + 1:], tmp)\r\n tmp.pop()\r\n\r\n backtrack(nums, [])\r\n return res\r\n\r\n # init first combination\r\n # nums = list(range(1, k + 1)) + [n + 1]\r\n\r\n # output, j = [], 0\r\n # while j < k:\r\n # # add current combination\r\n # output.append(nums[:k])\r\n # # increase first nums[j] by one\r\n # # if nums[j] + 1 != nums[j + 1]\r\n # j = 0\r\n # while j < k and nums[j + 1] == nums[j] + 1:\r\n # nums[j] = j + 1\r\n # j += 1\r\n # nums[j] += 1\r\n\r\n # return output\r\n\r\n\r\n# @lc code=end\r\n","repo_name":"lost-person/Leetcode","sub_path":"77.组合.py","file_name":"77.组合.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5484132963","text":"'''\nwhat is required for making mac and cheese\n\nAuthor: Noah Fedosoff\nID: 200420350\nEmail: fedo0350@mylaurier.ca\n__updated__ = \"2020-09-24\"\n\n'''\n# Constants\nmilk = 4\nbutter = 8\nflour = 0.5\nsalt = 2\n\n# Inputs\nservings_of_mac = int(input(\"Enter servings of Mac & Cheese:\"))\n\n# Calculations\nreal_serving = 6/servings_of_mac\nreal_milk = milk/real_serving\nreal_butter = butter/real_serving\nreal_flour = flour/real_serving\nreal_salt = salt/real_serving\n\n# Outputs\nprint(\"{0} servings of Mac & Cheese:\".format(servings_of_mac))\nprint(\"milk (cups): {0:.2f}\".format(real_milk))\nprint(\"butter (tablespoons): {0:.2f}\".format(real_butter))\nprint(\"flour (cups): {0}\".format(real_flour))\nprint(\"salt (teaspoons): {0:.2f}\".format(real_salt))\n","repo_name":"Minime998/uni-code","sub_path":"cp104/fedo0350_l2/src/t14.py","file_name":"t14.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17182902615","text":"def distribute_sweets(employees, k):\n total_sweets = sum(employees)\n if total_sweets % k != 0:\n return \"Packaging is not possible!\"\n\n sweets_per_pack = total_sweets // k\n packs = [[] for _ in range(k)]\n\n def backtrack(index):\n if index == len(employees):\n return True\n\n for i in range(k):\n if sum(packs[i]) + employees[index] <= sweets_per_pack:\n packs[i].append(employees[index])\n if backtrack(index + 1):\n return True\n packs[i].pop()\n return False\n\n if backtrack(0):\n return packs\n else:\n return \"Packaging is not possible!\"\n\n\nemployees = list(map(int, input().split(\", \")))\nk = int(input())\n\nresult = distribute_sweets(employees, k)\n\nif result == \"Packaging is not possible!\":\n print(result)\nelse:\n for i, pack in enumerate(result):\n print(f\"Package {i + 1}: {', '.join(map(str, pack))}\")","repo_name":"Warw1ck/algorithm_exercises","sub_path":"game_algorithms/pack_up_candy.py","file_name":"pack_up_candy.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"22488343886","text":"from typing import Union\nimport os\nimport discord\n\nfrom datetime import datetime, timedelta\n\nimport config as CONF\n\n# Fucntions\ndef match(raw,words):\n return any([i in raw.lower() for i in words])\n\nclient = discord.Bot(intents=discord.Intents.all())\n\n@client.event\nasync def on_ready():\n print(\"Bot Online\")\n\n@client.event\nasync def on_member_join(member):\n await member.send(f'Welcome {member.mention}!\\nPlease Identify yourself in #introductions before you get access to the rest of the server')\n\n@client.event\nasync def on_message(message):\n if message.channel.id == 947127564491686018 or message.author == client.user:\n return\n if message.content.startswith('|clear') and message.author.id in CONF.ADMIN_IDs:\n limit = int(message.content.split(' ')[1]) if len(message.content.split(' ')) > 1 else 10\n await message.channel.purge(limit=limit)\n await message.channel.send(f\"Cleared {limit} messages\", delete_after=3)\n if message.content.startswith('|reverify') and message.author.id in CONF.ADMIN_IDs:\n for member in message.guild.members:\n if 946341272015237150 not in [r.id for r in member.roles]:\n try:\n await member.send(f'Welcome {member.mention}!\\nPlease Identify yourself in #introductions before you get access to the rest of the server')\n except Exception as e:\n print(f\"Failed to send welcome message to {member.name}\")\n \n elif match(message.content,['attire']):\n today = datetime.today().weekday()\n tmr = today+1 if (today+1)<=6 else 0\n embed = discord.Embed(\n title = 'Attire',\n description = f\"**Today's Attire**:\\n{CONF.attire[today]}\\n\\n**Tomorrow's Attire**:\\n{CONF.attire[tmr]}\",\n colour = discord.Colour(0xE66E6B)\n )\n await message.channel.send(embed=embed)\n \n@client.event\nasync def on_raw_reaction_add(payload):\n emoji = payload.emoji.name\n member = payload.member\n channel_ = await member.guild.fetch_channel(payload.channel_id)\n message = await channel_.fetch_message(payload.message_id)\n if len(message.embeds) > 0:\n cache = message.embeds[0]\n if \"Managed by 22S209 Utility\" in cache.footer.text and len(cache.fields) == 1:\n opt_in = cache.fields[0].value.split(\"\\n\")\n if emoji == \"✅\" and str(member) not in opt_in:\n opt_in.append(str(member))\n if len(opt_in) > 1 and 'None' in opt_in:\n opt_in.remove('None')\n \n cache.remove_field(0)\n cache.add_field(name=\"Opt In\", value=\"\\n\".join(opt_in))\n await message.edit(embed=cache)\n\n elif emoji == \"❌\" and str(member) in opt_in:\n opt_in.remove(str(member))\n if len(opt_in) ==0 :\n opt_in = ['None']\n\n cache.remove_field(0)\n cache.add_field(name=\"Opt In\", value=\"\\n\".join(opt_in))\n await message.edit(embed=cache)\n\ndef timestamps(key=None):\n stamps = {\n '10 Minutes': datetime.now()+timedelta(minutes=10),\n '30 Minutes' : datetime.now()+timedelta(minutes=30),\n '1 Hour' : datetime.now()+timedelta(hours=1),\n '3 Hours' : datetime.now()+timedelta(hours=3),\n '6 Hours' : datetime.now()+timedelta(hours=6),\n '24 Hours' : datetime.now()+timedelta(days=1),\n 'Tomorrow Morning (8:00)': (datetime.now()+timedelta(days=1)).replace(hour=8),\n }\n if key is None:\n return stamps\n return int(stamps[key].timestamp()+28800)\n\n@client.slash_command(\n guild_ids=[946223026238783599],\n name='purchase',\n description='Create Purchase Menu',\n)\nasync def purchase(\n ctx: discord.ApplicationContext,\n item_name: discord.commands.Option(str, 'Name of Item', required=True),\n item_desc: discord.commands.Option(str, 'Description of Item', required=True),\n item_cost: discord.commands.Option(float, 'Cost of Item', required=True),\n expiry: discord.commands.Option(str, 'Expiry of this Menu', choices=timestamps().keys(), required=True)\n):\n item_cost = \"{:.2f}\".format(item_cost)\n embed = discord.Embed(\n title = f'Purchase of {item_name}',\n description = f\"**Item Description**: {item_desc}\\n**Item Cost**: **${item_cost}**\\n**Expiry**: \\n\\nDo React Below so that the representative can help you purchase this material on your behalf. Collection and Payment will be settled later.\",\n colour = discord.Colour(0xE66E6B)\n ).set_footer(text = f'Managed by 22S209 Utility | Created by {ctx.author}')\n embed.add_field(name=\"Opt In\", value='None')\n message = await ctx.send(embed=embed)\n await ctx.respond('Completed Without Errors\\n*Only you can see this*',ephemeral=True)\n await message.add_reaction('✅')\n await message.add_reaction('❌')\n\nclient.run(os.environ['token'])\n","repo_name":"TheReaper62/22S209-Utility","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29140821581","text":"from collections import deque\ndx = [1,-1,0,0]\ndy = [0,0,1,-1]\ndq = deque([])\nrow, col = map(int,input().split())\ncheezes = []\nfreezedCheeze = 0\ncount = 0\nmelted = 0\nfor i in range(row):\n line = list(map(int, input().split()))\n for j in range(col):\n if line[j]==1 : \n freezedCheeze += 1\n cheezes.append(line)\n\ndef bfs(r,c):\n meltedCheeze = 0\n visited = [[False] * col for _ in range(row)]\n dq.append((r,c))\n visited[r][c] = True\n while dq:\n r,c = dq.popleft()\n for i in range(4):\n nr = r + dx[i]\n nc = c + dy[i]\n if 0<=nr 0:\n self.is_playing = True\n\n if self.is_loop is False:\n # Get the First URL\n self.m_url = self.music_queue[0][0]['source']\n self.curr_title = self.music_queue[0][0]['title']\n\n # Remove the first element as you are currently playing it\n self.music_queue.pop(0)\n\n self.vc.play(discord.FFmpegPCMAudio(\n source=self.m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())\n elif len(self.music_queue) == 0 and self.is_loop is True:\n self.is_playing = True\n\n self.vc.play(discord.FFmpegPCMAudio(\n source=self.m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())\n else:\n self.is_playing = False\n\n # Infinite loop checking\n async def play_music(self, ctx):\n if len(self.music_queue) > 0 and self.is_loop is False:\n self.is_playing = True\n self.m_url = self.music_queue[0][0]['source']\n self.curr_title = self.music_queue[0][0]['title']\n # the queue of songs contains a sub-queue(array) of len 2, containing the obj and the voice channel\n\n # trying to connect the bot to channel is the bot is not already connected to the voice channel\n # trying to call the bot to the specific voice channel (currently where users are)\n if self.vc is None or not self.vc.is_connected():\n self.vc = await self.music_queue[0][1].connect()\n\n # if the bot fails to connect to the respective vc\n if self.vc is None:\n await ctx.send(\"The bot could not connect to the voice channel\")\n return\n else:\n await self.vc.move_to(self.music_queue[0][1])\n\n # remove the first element as you are currently playing it\n self.music_queue.pop(0)\n self.vc.play(discord.FFmpegPCMAudio(\n source=self.m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())\n # print(type(self.m_url))\n else:\n self.is_playing = False\n\n def is_present(self, ctx):\n if ctx.author.voice is None:\n return True\n else:\n return False\n\n @commands.command(name=\"join\", aliases=[\"j\"], help=\"The bot joins the voice channel\")\n async def join(self, ctx):\n if self.is_present(ctx) is True:\n await ctx.send(\"You're not connected to the voice channel to use these commands\")\n else:\n voice_channel = ctx.author.voice.channel\n if ctx.voice_client is None:\n self.vc = await voice_channel.connect()\n else:\n await self.vc.move_to(voice_channel)\n\n @commands.command(name=\"play\", aliases=[\"p\"], help=\"Plays a selected song from youtube\")\n async def play(self, ctx, *args):\n if self.is_present(ctx) is True:\n await ctx.send(\"You're not connected to the voice channel to use these commands\")\n else:\n query = \" \".join(args)\n\n voice_channel = ctx.author.voice.channel\n\n if voice_channel is None:\n # you need to be connected so that the bot knows where to go\n await ctx.send(\"Connect to a voice channel yar kaisa horay\")\n elif self.is_paused and len(args) == 0:\n self.vc.resume()\n else:\n song = self.search_yt(query)\n if type(song) is type(True):\n await ctx.send(\n \"Could not get the song. Incorrect format try another keyword. This could be due to playlist or a livestream format\")\n else:\n await ctx.send(song['title'])\n self.music_queue.append([song, voice_channel])\n\n if self.is_playing is False:\n await self.play_music(ctx)\n\n @commands.command(name=\"pause\", aliases=[\"ps\"], help=\"Pauses the current song being played\")\n async def pause(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n # if self.is_playing:\n self.is_playing = False\n self.is_paused = True\n self.vc.pause()\n # elif self.is_paused:\n # self.vc.resume()\n\n @commands.command(name=\"resume\", aliases=[\"r\"], help=\"Resumes playing with the discord bot\")\n async def resume(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n if self.is_paused:\n self.vc.resume()\n\n @commands.command(name=\"current\", aliases=[\"curr\"], help=\"Gets the current title of the song that is playing\")\n async def current(self, ctx):\n if self.is_present(ctx) is True:\n await ctx.send(\"You're not connected to the vice channel to use these commands\")\n else:\n if self.curr_title == \"\":\n await ctx.send(\"There are no tracks playing at the moment\")\n else:\n await ctx.send(\"Currently Playing: \" + self.curr_title)\n\n @commands.command(name=\"lyrics\", help=\"Gets the lyrics of the given or current song\")\n async def lyrics(self, ctx, name: t.Optional[str]):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n name = name or self.curr_title\n\n extract_lyrics = SongLyrics(os.getenv(\"LYRICS_API_KEY\"), os.getenv(\"GCS_ENGINE_ID\"))\n\n res = extract_lyrics.get_lyrics(name)\n\n embed = discord.Embed(\n title=res['title'],\n description=res['lyrics'],\n colour=ctx.author.color,\n timestamp=dt.datetime.utcnow()\n )\n await ctx.send(embed=embed)\n\n @commands.command(name=\"skip\", aliases=[\"s\"], help=\"Skips the current song begin played\")\n async def skip(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n if self.vc is not None and self.vc:\n self.vc.stop()\n # Try to play next in the queue if it exists\n await self.play_music(ctx)\n\n @commands.command(name=\"remove\", help=\"Removes the first track \")\n async def remove(self, ctx, position: t.Optional[int]):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n position = position or len(self.music_queue)\n self.music_queue.pop(position - 1)\n await ctx.send(\"Track at position {} removed!!\".format(position))\n\n\n @commands.command(name=\"queue\", aliases=[\"q\"], help=\"Displays the current songs in queue\")\n async def queue(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n retval = \"Songs in the queue are:\\n\"\n for i in range(0, len(self.music_queue)):\n # Display all songs\n retval += \"{}) {}\".format((i + 1), self.music_queue[i][0]['title']) + \"\\n\"\n\n if len(self.music_queue) != 0:\n await ctx.send(retval)\n else:\n await ctx.send(\"No music in queue\")\n\n @commands.command(name=\"loop\", help=\"Loops the currently playing track\")\n async def loop(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n self.is_loop = True\n await ctx.send(\"Looping the current song!\")\n\n @commands.command(name=\"loop_off\", aliases=[\"lo\"], help=\"Loops the currently playing track\")\n async def loopOff(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n self.is_loop = False\n await ctx.send(\"Looping disabled!\")\n\n @commands.command(name=\"clear\", aliases=[\"c\", \"bin\"], help=\"Stops the music and clears the queue\")\n async def clear(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n if self.vc != None and self.is_playing:\n self.vc.stop()\n self.music_queue = []\n self.m_url = \"\"\n self.curr_title = \"\"\n await ctx.send(\"Music queue cleared\")\n\n @commands.command(name=\"stop\", aliases=[\"st\"], help=\"Stops the music and clears the queue\")\n async def stop(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n if self.vc != None and self.is_playing:\n self.vc.stop()\n self.music_queue = []\n self.m_url = \"\"\n self.curr_title = \"\"\n\n @commands.command(name=\"dc\", aliases=[\"disconnect\", \"l\", \"d\", \"leave\"], help=\"Kick the bot from the VC\")\n async def dc(self, ctx):\n if self.is_present(ctx):\n await ctx.send(\"You are not connected to any voice channel!!!\")\n else:\n self.is_playing = False\n self.is_paused = False\n await self.vc.disconnect()\n","repo_name":"Nis7538/Discord-Music-Bot","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":10667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20300236108","text":"from contextlib import contextmanager\nfrom typing import Generator\n\nfrom dualsense_controller.api.DualSenseController import DualSenseController, Mapping\nfrom dualsense_controller.api.enum import UpdateLevel\nfrom dualsense_controller.core.hidapi import DeviceInfo\nfrom dualsense_controller.core.state.typedef import Number\n\n\n@contextmanager\ndef active_dualsense_controller(\n # CORE\n device_index_or_device_info: int | DeviceInfo = 0,\n left_joystick_deadzone: Number = 0.05,\n right_joystick_deadzone: Number = 0.05,\n left_trigger_deadzone: Number = 0,\n right_trigger_deadzone: Number = 0,\n gyroscope_threshold: int = 0,\n accelerometer_threshold: int = 0,\n orientation_threshold: int = 0,\n mapping: Mapping = Mapping.NORMALIZED,\n update_level: UpdateLevel = UpdateLevel.DEFAULT,\n # OPTS\n microphone_initially_muted: bool = True,\n microphone_invert_led: bool = False,\n) -> Generator[DualSenseController, None, None]:\n controller: DualSenseController = DualSenseController(\n device_index_or_device_info,\n left_joystick_deadzone=left_joystick_deadzone,\n right_joystick_deadzone=right_joystick_deadzone,\n left_trigger_deadzone=left_trigger_deadzone,\n right_trigger_deadzone=right_trigger_deadzone,\n gyroscope_threshold=gyroscope_threshold,\n accelerometer_threshold=accelerometer_threshold,\n orientation_threshold=orientation_threshold,\n mapping=mapping,\n update_level=update_level,\n microphone_initially_muted=microphone_initially_muted,\n microphone_invert_led=microphone_invert_led,\n )\n controller.activate()\n try:\n yield controller\n finally:\n controller.deactivate()\n","repo_name":"yesbotics/dualsense-controller-python","sub_path":"src/dualsense_controller/api/contextmanager.py","file_name":"contextmanager.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"12560075742","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom django.conf import settings\nimport json\nfrom vk_api.keyboard import VkKeyboard\n\nfrom bot_logic.vk_bot_logic import send_message, button_response\nfrom products.views import get_category_dict\n\n\n@csrf_exempt\ndef index(request):\n\tif request.method == \"POST\":\n\t\tsection_dict = get_category_dict()\n\t\tdata = json.loads(request.body.decode('utf-8'))\n\t\tmessage_data = data['object']['message']\n\t\tkeyboard = VkKeyboard(one_time=False, inline=True)\n\t\tfor elem, value in section_dict.items():\n\t\t\tkeyboard.add_button(elem)\n\t\tmessage_data['keyboard'] = keyboard.get_keyboard()\n\t\tclean_text = message_data['text']\n\t\tif data['type'] == 'confirmation': # if VK server request confirmation\n\t\t\treturn settings.VK_GET_KEY\n\n\t\telif data['type'] == 'message_new':\n\n\t\t\tif clean_text in section_dict:\n\t\t\t\tcategory_id = section_dict[clean_text]\n\t\t\t\tsend_message(\n\t\t\t\t\tmessage=f'Запрос принят. Минуточку... Сейчас обрабатывается запрос {clean_text}: id={category_id}',\n\t\t\t\t\tevent=message_data, keyboard=False\n\t\t\t\t)\n\t\t\t\tfor i in button_response(category_id):\n\t\t\t\t\tsend_message(message=i['message'], event=message_data, attachment=i['attachment'], keyboard='None')\n\n\t\t\t\tsend_message(message='Продолжим...', event=message_data)\n\n\t\t\t\treturn HttpResponse('ok', content_type=\"text/plain\", status=200)\n\t\t\telse:\n\t\t\t\tsend_message(event=message_data, message=f'Ответ на простое сообщение, не на кнопку... \\n {clean_text}')\n\t\t\t\treturn HttpResponse('ok', content_type=\"text/plain\", status=200)\n\telse:\n\t\treturn HttpResponse('see you :)')\n","repo_name":"dronpavlove/vk_django_bot","sub_path":"vk_bot/bot_logic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6914247170","text":"\"\"\"\nLeetCode :: Partition List\njramaswami\n\"\"\"\nfrom leetcode_linked_lists import *\n\n\nclass Solution:\n def partition(self, head: ListNode, x: int) -> ListNode:\n less_head = less_tail = None\n more_head = more_tail = None\n\n curr_node = head\n while curr_node:\n if curr_node.val < x:\n if not less_head:\n less_head = curr_node\n if less_tail:\n less_tail.next = curr_node\n less_tail = curr_node\n else:\n if not more_head:\n more_head = curr_node\n if more_tail:\n more_tail.next = curr_node\n more_tail = curr_node\n curr_node = curr_node.next\n\n if more_tail:\n more_tail.next = None\n if less_head:\n less_tail.next = more_head\n return less_head\n else:\n return more_head\n \n\ndef test_1():\n head = [1,4,3,2,5,2]\n x = 3\n expected = [1,2,2,4,3,5]\n result = Solution().partition(make_list(head), x)\n assert make_arr(result) == expected\n\n\ndef test_2():\n head = [2, 1]\n x = 2\n expected = [1,2]\n result = Solution().partition(make_list(head), x)\n assert make_arr(result) == expected\n\n\ndef test_3():\n head = [5, 4, 3, 2]\n x = 1\n expected = list(head)\n result = Solution().partition(make_list(head), x)\n assert make_arr(result) == expected\n","repo_name":"jramaswami/LeetCode_Python","sub_path":"partition_list.py","file_name":"partition_list.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"44266188546","text":"# Ex_15650 N과 M(2) [실3]\ndef dfs(depth):\n if depth == M:\n print(*ans[1:])\n\n elif depth < M:\n for i in range(N):\n if not v[i] and ans[-1] < A[i]: # 오름차순만 인정\n v[i] = True\n ans.append(A[i])\n dfs(depth + 1)\n v[ans.pop() - 1] = False # 이미 출력한 원소를 되추적하며 방문 취소\n\n\nN, M = map(int, input().split())\nA = [i + 1 for i in range(N)]\nv = [False] * N\nans = [0]\ndfs(0)\n","repo_name":"Cha-Ji/Algorithm","sub_path":"Backjoon/old/21_04_str_backT/21_04_02_backtrack/15650.py","file_name":"15650.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15212870353","text":"class Solution:\n def reverseWords(self, s: str) -> str:\n s = s.split()\n for i in range(len(s)):\n word = list(s[i])\n word.reverse()\n word = \"\".join(word)\n s[i] = word\n \n return \" \".join(s)","repo_name":"bereket2sh/competitive_programming","sub_path":"557-reverse-words-in-a-string-iii/557-reverse-words-in-a-string-iii.py","file_name":"557-reverse-words-in-a-string-iii.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"43440199835","text":"from __future__ import annotations\n\nimport sys\n\nif sys.version_info >= (3, 8): # pragma: no cover\n from functools import singledispatchmethod as _singledispatchmethod\n\n class singledispatchmethod(_singledispatchmethod):\n def register(self, cls, method=None):\n \"\"\"generic_method.register(cls, func) -> func\n\n Registers a new implementation for the given *cls* on a *generic_method*.\n \"\"\"\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)\n\nelse:\n from functools import singledispatch, update_wrapper\n\n class singledispatchmethod:\n \"\"\"Single-dispatch generic method descriptor.\n\n Supports wrapping existing descriptors and handles non-descriptor\n callables as instance methods.\n \"\"\"\n\n def __init__(self, func):\n if not callable(func) and not hasattr(func, \"__get__\"):\n raise TypeError(f\"{func!r} is not callable or a descriptor\")\n\n self.dispatcher = singledispatch(func)\n self.func = func\n\n def register(self, cls, method=None):\n \"\"\"generic_method.register(cls, func) -> func\n\n Registers a new implementation for the given *cls* on a *generic_method*.\n \"\"\"\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)\n\n def __get__(self, obj, cls=None):\n def _method(*args, **kwargs):\n method = self.dispatcher.dispatch(args[0].__class__)\n return method.__get__(obj, cls)(*args, **kwargs)\n\n _method.__isabstractmethod__ = self.__isabstractmethod__\n _method.register = self.register\n update_wrapper(_method, self.func)\n return _method\n\n @property\n def __isabstractmethod__(self):\n return getattr(self.func, \"__isabstractmethod__\", False)\n","repo_name":"pyeventsourcing/eventsourcing","sub_path":"eventsourcing/dispatch.py","file_name":"dispatch.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":1331,"dataset":"github-code","pt":"2"} +{"seq_id":"8360740952","text":"\"\"\"Create and implements the commands\"\"\"\nfrom Framework.Constants import StatusConst\nfrom Framework.Local import Local\n\n__author__ = 'Thadeu Jose'\n\n\nclass Command:\n \"\"\"Base class of all commands\"\"\"\n def __init__(self, local, controller, framework):\n self.local = local\n self._controller = controller\n self.framework = framework\n\n def __call__(self, args):\n return self.function(args)\n\n def function(self, args):\n \"\"\"Define what the command will do\"\"\"\n pass\n\n\nclass Go(Command):\n \"\"\"Command you use to walk in the history\"\"\"\n def function(self, args):\n local = self.local.getlocal(args[0])\n if isinstance(local, Local):\n self._controller.currentlocal = local\n return local.__str__()\n return \"You cant go in this direction\"\n\n\nclass Get(Command):\n \"\"\"Command you use to pick a item\"\"\"\n def function(self, args):\n inventory = self.local.getstatus(StatusConst.INVENTORY)\n itemname = \" \".join(args)\n if not inventory:\n return \"There is nothing to get here\"\n if itemname not in inventory:\n return \"There is no item call \" + itemname.capitalize()\n item = inventory.take(itemname)\n cancollect = self._collectable(item, StatusConst.COLLECTABLE) and self._collectable(item, StatusConst.VISIBLE)\n if cancollect:\n self._controller.additem(item)\n return \"You sucessful get \" + itemname.capitalize()\n inventory.add(item)\n return \"You cant get the item\"\n\n def _collectable(self, item, idstatus):\n return getstatus(item, idstatus) if hasstatus(item, idstatus) else True\n\n\n\n\n # TODO Get more then one item\n # REGEX see if has a number not follow for nothing\n\n\n''' if len(args == :\n try:\n if args[0] in inventory:\n self.controller.setitem(inventory.take(args[0 , nt(args[1].strip())))\n return \"You sucessful get \"+args[0]\n return \"You cant get the item\"\n except Exception as e:\n print(e)\n return \"You cant get the item\"'''\n\n\n#Revisar\n#-----------------------------------------------------------------------------------------------------------------------\n\nclass Inv(Command):\n def function(self, args):\n if self._controller.quantitem() == 0:\n return \"You have no item\"\n return \"You have \" + str(self._controller.inventory())\n\n\nclass See(Command):\n \"\"\"Command you use to see in detail something\"\"\"\n def function(self, args):\n inv = getstatus(self.local, StatusConst.INVENTORY)\n if not args:\n result = list()\n for elem in inv:\n result.append(str(elem))\n if not result:\n return \"You see nothing\"\n return \"You see \"+\",\".join(result)\n if args:\n itemname = \" \".join(args)\n if itemname in inv:\n item = inv.take(itemname)\n inv.add(item)\n return str(item)\n return \"There nothing to see here\"\n\n def _visible(self, item, idstatus):\n return getstatus(item, idstatus) if hasstatus(item, idstatus) else True\n\nclass Open(Command):\n \"\"\"Command you use to open a container\"\"\"\n def function(self, args):\n if not args:\n return \"You have to give the name of the item you want to open\"\n inv = getstatus(self.local, StatusConst.INVENTORY)\n itemname = \" \".join(args)\n if itemname not in inv:\n return \"There is no item call \" + itemname.capitalize()\n for elem in inv:\n item = elem.item\n if hasstatus(item, StatusConst.INSIDE):\n containername = getstatus(item, StatusConst.INSIDE)\n if itemname.lower() == containername.lower():\n setstatus(item, StatusConst.VISIBLE, True)\n return \"You open \" + containername.capitalize()","repo_name":"ThadeuJose/FrameworkTextAdventure2","sub_path":"Framework/Commands.py","file_name":"Commands.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17877371843","text":"from multiprocessing.dummy import Pool\nfrom typing import Any, Callable, Iterable\n\nfrom .istarmap import istarmap as _ # noqa\n\n\ndef execute_parallel(\n func: Callable[..., Any],\n args: Iterable[tuple[Any, ...]],\n):\n pool = Pool()\n results = pool.istarmap(func, args)\n for result in results:\n yield result\n","repo_name":"code-yeongyu/twitter_video_tools_v2","sub_path":"twtvt/utils/execute_parallel.py","file_name":"execute_parallel.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"} +{"seq_id":"41162740836","text":"from flask import Flask,request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/test',methods=['POST'])\ndef test_data():\n name = request.get_json()\n print('hello from flask',name)\n return\"hello\"\n\n@app.route('/train',methods=['POST'])\ndef train_data():\n name = request.get_json()\n print('hello from flask',name['x_'])\n return\"hello\"\n \nif __name__ == \"__main__\":\n app.run(host=\"localhost\",port=5000,debug=True)","repo_name":"kaviya15/veena","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"988392456","text":"import web_crawler as wc\nimport process_data as pcd\nimport pandas as pd\nimport pickle\n\ninitial_list = pd.read_csv('zip_codes.csv', header=None)\ncodes = [val for val in initial_list[0]][:10]\ncodes = [95070, 95129]\n\nsoups = {}\n\n\ndriver = wc.init_driver('./chromedriver')\nwc.navigate_to_page(driver, 'https://www.realtor.com/')\nfor code in codes:\n soup = wc.navigate_to_code(driver, code)\n soups.update(soup)\nhomes_list = pcd.process_soups(soups, codes)\ndf = pcd.create_df(homes_list)\npcd.create_csv(df)\n\nwc.terminate_driver(driver)\n","repo_name":"amarbhatia95/real_estate_scraper_public","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1174629145","text":"import datetime\nimport sys\n\nimport psycopg2\nimport pytz\n\nsts = datetime.datetime(2010, int(sys.argv[1]), 1)\nets = sts + datetime.timedelta(days=35)\nets = ets.replace(day=1)\n\nPGCONN = psycopg2.connect(database=\"mec\", host=\"127.0.0.1\", port=\"5555\")\ncursor = PGCONN.cursor()\ncursor2 = PGCONN.cursor()\n\ncursor.execute(\n \"\"\"SELECT id, unitnumber, ST_x(geom), ST_y(geom)\n from turbines\"\"\"\n)\n\no = open(\"turbines.csv\", \"w\")\no.write(\"TID,LON,LAT\\n\")\n\np = open(\"turbine_data_%s.csv\" % (sts.strftime(\"%Y%m\"),), \"w\")\np.write(\"TID,VALID_UTC,VALID_LOCAL,POWER,YAW,YAW2,WINDSPEED,PITCH\\n\")\n\nfor row in cursor:\n o.write(\"%s,%.6f,%.6f\\n\" % (row[0], row[2], row[3]))\n\n cursor2.execute(\n \"\"\"SELECT valid at time zone 'UTC', power, yaw, yaw2,\n windspeed, pitch from sampled_data_%s WHERE\n valid between '%s' and '%s'\n ORDER by valid ASC \"\"\"\n % (row[1], sts.strftime(\"%Y-%m-%d\"), ets.strftime(\"%Y-%m-%d\"))\n )\n\n for row2 in cursor2:\n ts = row2[0]\n ts = ts.replace(tzinfo=pytz.timezone(\"UTC\"))\n p.write(\n (\"%s,%s,%s,%s,%s,%s,%s,%s\\n\")\n % (\n row[0],\n ts.strftime(\"%Y-%m-%d %H:%M:%S\"),\n (ts.astimezone(pytz.timezone(\"America/Chicago\"))).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n row2[1],\n row2[2],\n row2[3],\n row2[4],\n row2[5],\n )\n )\n\np.close()\no.close()\n","repo_name":"akrherz/DEV","sub_path":"mec/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"71550057006","text":"import logging\nimport socket\nimport ssl\n\n# App\nimport d1_onedrive.impl.disk_cache\nimport d1_onedrive.impl.onedrive_exceptions\n\nimport d1_common\nimport d1_common.types.dataoneTypes\nimport d1_common.types.exceptions\n\nimport d1_client.cnclient\nimport d1_client.d1client\n\n# Set up logger for this module.\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n\nclass DataONEClient:\n def __init__(self, options):\n self._options = options\n self._science_object_cache = d1_onedrive.impl.disk_cache.DiskCache(\n options.sci_obj_max_cache_items, options.sci_obj_cache_path\n )\n self._system_metadata_cache = d1_onedrive.impl.disk_cache.DiskCache(\n options.sys_meta_max_cache_items, options.sys_meta_cache_path\n )\n\n self.client = d1_client.cnclient.CoordinatingNodeClient(\n base_url=self._options.base_url\n )\n self.query_engine_description = None\n self.all_facet_names = None\n\n def get_science_object(self, pid):\n return self._get_science_object_through_cache(pid)\n\n def get_system_metadata(self, pid):\n \"\"\"This method causes an implicit validation of the retrieved System\n Metadata.\"\"\"\n return self._get_system_metadata(pid)\n\n def get_system_metadata_as_string(self, pid):\n \"\"\"This method does not include validation of the System Metadata.\"\"\"\n return self._get_system_metadata_as_string_through_cache(pid)\n\n def describe(self, pid):\n try:\n return self.client.describe(pid)\n except d1_common.types.exceptions.DataONEException as e:\n raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(e.description)\n except (ssl.SSLError, socket.error) as e:\n raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(str(e))\n\n #\n # Private.\n #\n\n # Science object.\n\n def _get_science_object_through_cache(self, pid):\n try:\n return self._science_object_cache[pid]\n except KeyError:\n pass\n self._science_object_cache._delete_oldest_file_if_full()\n science_object = self._get_science_object(pid)\n self._science_object_cache[pid] = science_object\n return science_object\n\n def _get_science_object(self, pid):\n try:\n d1client = d1_client.d1client.DataONEClient(\n cnBaseUrl=self._options.base_url\n )\n return d1client.get(pid).read()\n except d1_common.types.exceptions.DataONEException as e:\n raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(e.description)\n except (ssl.SSLError, socket.error) as e:\n raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(str(e))\n\n # System Metadata as PyXB object.\n\n def _get_system_metadata(self, pid):\n return d1_common.types.dataoneTypes.CreateFromDocument(\n self._get_system_metadata_as_string_through_cache(pid)\n )\n\n # System Metadata as string.\n\n def _get_system_metadata_through_cache(self, pid):\n return d1_common.types.dataoneTypes.CreateFromDocument(\n self._get_system_metadata_as_string_through_cache(pid)\n )\n\n def _get_system_metadata_as_string_through_cache(self, pid):\n try:\n return self._system_metadata_cache[pid]\n except KeyError:\n pass\n self._system_metadata_cache._delete_oldest_file_if_full()\n sys_meta_str = self._get_system_metadata_as_string(pid)\n self._system_metadata_cache[pid] = sys_meta_str\n return sys_meta_str\n\n def _get_system_metadata_as_string(self, pid):\n try:\n result = self.client.getSystemMetadataResponse(pid)\n return result.read()\n except d1_common.types.exceptions.DataONEException as e:\n raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(e.description)\n except (ssl.SSLError, socket.error) as e:\n raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(str(e))\n","repo_name":"DataONEorg/d1_python","sub_path":"client_onedrive/src/d1_onedrive/impl/clients/onedrive_d1_client.py","file_name":"onedrive_d1_client.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"2"} +{"seq_id":"23495482668","text":"# try :\n# 예외 발생 가능 실행문\n# 정상 흐름 구현\n# except :\n# 예외 발생시 실행문\n# finally :\n# 예외 발생여부 상관없이 무조건 실행되는 실행문\n\n# Multi Exception 처리 : SubClass 타입부터 처리.\n\nlist_data = [1,2,3,4,5]\ndivide_sum = 0\ntry :\n r = int(input(\"반지름 ? \")) # ValueError - 문자가 입력될 경우.\n # for data in list_data : # 0 ~ len(list_data):인덱스 값과 같음.\n # print(data)\n for index in range(len(list_data)+1) : # len(list_data)는 인덱스와 같아서 오류가 발생하지 않음.\n divide_sum += list_data[index] / r # ZeroDivisionError - r이 0일 경우.\n print(\"{0}번째 데이터 : {1}\".format(index,list_data[index])) # IndexError - len(list_data)+1 이므로.\n\n print(\"원의 반지름 : \",r)\n print(\"원 둘례 : \",2*3.14*r)\n print(\"원 넓이 : \",3.14*r*r)\n\nexcept ValueError:\n print(\"ValueError : 숫자 입력\")\nexcept IndexError :\n print(\"IndexError : 리스트 데이터는 0~{0}까지 접근 가능\".format(len(list_data)))\nexcept Exception as error : \n print(error,\"프로그램 비정상 종료.\")\n\nfinally :\n print(\"예외 여부 상관없이 출력.\")\n\n\n# 사용자 정의 예외 - 클래스\n# 1. Exception을 상속받는 사용자정의예외클래스 정의\n# 2. 함수 예외 상황이 발생됐을 때 raise 이용하여 강제로 예외 발생\n# 3. 호출하는 쪽에서 처리\n# try :\n# 사용자 정의 예외 발생하는 함수 호출\n# except 사용자정의예외클래스 타입 : \n# 예외 처리 실행문\n# finally :\n# 무조건 실행되는 실행문\n\n# class UserException(Exception): \n # def __init__(self):\n # Exception.__init__(self) ??????????\n\n # def __str__(self):\n # return \"사용자 정의 오류 발생\"\n\nclass UserException(Exception):\n def __init__(self,message):\n super().__init__(message)\n\nraise UserException(\"사용자 정의 오류 발생\")","repo_name":"happyAyun/MSA_TIL","sub_path":"python_workspace/basic_grammer/12_exception_test.py","file_name":"12_exception_test.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"1115505713","text":"\"\"\"\r\nÖnce Nasıl Input aldğımızı hatılayalım.\r\n\"\"\"\r\n\r\n# yas = int(input(\"Yaşınızı girin: \"))\r\n\r\n\"\"\"\r\nyas = int(input(\"Yaşınızı girin: \"))\r\n\r\nif (yas>= 18):\r\n print(\"Reşitsin.\")\r\nelif yas>=16:\r\n print(\"16 yaşından büyüksün.\")\r\nelse:\r\n print(\"sağlana durumlara uygun değilsin\")\r\n\"\"\"\r\n\r\n\"\"\"\r\na = [1 ,3 ,4]\r\na[1]\r\nfor i in range(5):\r\n eksi = \"*\" * i*2\r\n print(eksi.center(15))\r\n\"\"\"\r\n\"\"\"\r\ni = 100\r\nwhile i > 5:\r\n print(\"xxxxxx\")\r\n i = i /5\r\nyas = int(input(\"Yaşınızı girin: \"))\r\n\r\ntablo[0][0] +=1\r\nprint(\"test\",end = \" \")\r\n\"\"\"\r\ntablo = [[0,0,0],[0,0,0],[0,0,0]]\r\ncevap = \"evet\"\r\nwhile cevap == \"evet\":\r\n a = int(input(\"birinci değer \"))\r\n b = int(input(\"ikinci değer \"))\r\n tablo[a][b] +=1\r\n for i in range(len(tablo)):\r\n for j in range(len(tablo[0])):\r\n print(tablo[i][j],end = \" \")\r\n print()\r\n cevap = input( \"Devam etsin mi? \")","repo_name":"Emirhantaze/Python-dersleri-IEEE-AGU","sub_path":"2.hafta/2022-dersinde-islenenler.py","file_name":"2022-dersinde-islenenler.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"29283959170","text":"from __future__ import division\nimport numpy as np\nfrom .cached_log import LogComputer, LogGammaComputer\n\nclass ScorerFactory:\n def __init__(self, alpha, beta):\n assert alpha >= 0\n assert beta >= 0\n if isinstance(alpha, float) and alpha.is_integer():\n self.alpha = int(alpha)\n else:\n self.alpha = alpha\n self.beta = beta\n self.log_gamma_computer = LogGammaComputer()\n self.log_gamma_alpha_computer = LogGammaComputer(shift=alpha)\n self.log_computer = LogComputer(shift = beta)\n\n def __call__(self, counts, split_candidates):\n if isinstance(self.alpha, int):\n return LogMarginalLikelyhoodIntAlphaComputer(counts, self.alpha, self.beta, split_candidates,\n log_computer=self.log_computer,\n log_gamma_computer=self.log_gamma_computer,\n log_gamma_alpha_computer=self.log_gamma_alpha_computer)\n else:\n return LogMarginalLikelyhoodRealAlphaComputer(counts, self.alpha, self.beta, split_candidates,\n log_computer=self.log_computer,\n log_gamma_computer=self.log_gamma_computer,\n log_gamma_alpha_computer=self.log_gamma_alpha_computer)\n\ndef assert_correct_counts(counts):\n assert isinstance(counts, np.ndarray)\n assert counts.dtype == int\n assert np.all(counts >= 0)\n assert len(counts) > 0\n\ndef assert_correct_split_candidates(split_candidates, counts):\n assert isinstance(split_candidates, np.ndarray)\n assert split_candidates[0] == 0\n assert split_candidates[-1] == len(counts)\n assert np.all(split_candidates[1:] > split_candidates[:-1]) # strictly ascending\n\n# Indexing of LogMarginalLikelyhoodComputer iterates over split candidates, not counts\n# LogMarginalLikelyhoodComputer is a base class. It's likely that you need one of its subclasses:\n# either LogMarginalLikelyhoodIntAlphaComputer or LogMarginalLikelyhoodRealAlphaComputer\nclass LogMarginalLikelyhoodComputer:\n def __init__(self, counts, alpha, beta, split_candidates, log_computer=None, log_gamma_computer=None, log_gamma_alpha_computer=None):\n self.alpha = alpha\n\n self.log_computer = log_computer if log_computer else LogComputer(shift=beta)\n self.log_gamma_computer = log_gamma_computer if log_gamma_computer else LogGammaComputer()\n self.log_gamma_alpha_computer = log_gamma_alpha_computer if log_gamma_alpha_computer else LogGammaComputer(shift=alpha)\n\n assert_correct_counts(counts)\n assert_correct_split_candidates(split_candidates, counts)\n self.split_candidates = split_candidates\n\n self.cumsum = np.hstack([0, np.cumsum(counts)])[split_candidates]\n\n count_logfacs = self.log_gamma_computer.compute_for_array_unbound(counts + 1)\n self.logfac_cumsum = np.hstack([0, np.cumsum(count_logfacs)])[split_candidates]\n\n self.segment_creation_cost = alpha * self.log_computer.compute_for_number(0) - self.log_gamma_alpha_computer.compute_for_number(0)\n\n def total_sum_logfac(self):\n return self.logfac_cumsum[-1]\n\n def scores(self):\n segment_lengths = np.diff(self.split_candidates)\n segment_counts = np.diff(self.cumsum)\n shifted_segment_counts = segment_counts + self.alpha\n add = self.log_gamma_alpha_computer.compute_for_array_unbound(segment_counts)\n sub = shifted_segment_counts * self.log_computer.compute_for_array_unbound(segment_lengths)\n self_scores = add - sub\n return self_scores + self.segment_creation_cost\n\n def log_marginal_likelyhoods(self):\n segment_sum_logfacs = np.diff(self.logfac_cumsum)\n return self.scores() - segment_sum_logfacs\n\n def mean_counts(self):\n segment_lengths = np.diff(self.split_candidates)\n segment_counts = np.diff(self.cumsum)\n return segment_counts / segment_lengths\n\n def score(self, start, stop):\n return self.self_score(start, stop) + self.segment_creation_cost\n\n def self_score(self, start, stop):\n segment_count = self.cumsum[stop] - self.cumsum[start]\n shifted_segment_count = segment_count + self.alpha\n segment_length = self.split_candidates[stop] - self.split_candidates[start]\n add = self.log_gamma_alpha_computer.compute_for_number(segment_count)\n sub = shifted_segment_count * self.log_computer.compute_for_number(segment_length)\n return add - sub\n\n def self_score_no_splits(self):\n return self.self_score(0, len(self.split_candidates) - 1)\n def score_no_splits(self):\n return self.self_score_no_splits() + self.segment_creation_cost\n\nclass LogMarginalLikelyhoodIntAlphaComputer(LogMarginalLikelyhoodComputer):\n # marginal likelihoods for segments [i, stop) for all i < stop.\n # [i, stop) means that segment boundaries are ... i - 1][i ...... stop - 1][stop ...\n # These scores are not corrected for constant penalty for segment creation\n def all_suffixes_self_score(self, stop):\n # segment_count + alpha\n # it's more efficient to add up numbers, then add result to vector\n # (alternative is to add numbers to a vector one-by-one)\n shifted_segment_count_vec = (self.alpha + self.cumsum[stop]) - self.cumsum[0:stop]\n\n segment_length_vec = self.split_candidates[stop] - self.split_candidates[:stop]\n\n add_vec = self.log_gamma_computer.compute_for_array(shifted_segment_count_vec, max_value=(self.alpha + self.cumsum[stop]))\n sub_vec = shifted_segment_count_vec * self.log_computer.compute_for_array(segment_length_vec, max_value=self.split_candidates[stop])\n return add_vec - sub_vec\n\nclass LogMarginalLikelyhoodRealAlphaComputer(LogMarginalLikelyhoodComputer):\n # marginal likelihoods for segments [i, stop) for all i < stop.\n # [i, stop) means that segment boundaries are ... i - 1][i ...... stop - 1][stop ...\n # These scores are not corrected for constant penalty for segment creation\n def all_suffixes_self_score(self, stop):\n segment_count_vec = self.cumsum[stop] - self.cumsum[0:stop]\n # segment_count + alpha\n # it's more efficient to add up numbers, then add result to vector\n # (alternative is to add numbers to a vector one-by-one)\n shifted_segment_count_vec = (self.alpha + self.cumsum[stop]) - self.cumsum[0:stop]\n\n segment_length_vec = self.split_candidates[stop] - self.split_candidates[:stop]\n\n add_vec = self.log_gamma_alpha_computer.compute_for_array(segment_count_vec, max_value=self.cumsum[stop])\n sub_vec = shifted_segment_count_vec * self.log_computer.compute_for_array(segment_length_vec, max_value=self.split_candidates[stop])\n return add_vec - sub_vec\n","repo_name":"autosome-ru/pasio","sub_path":"src/pasio/log_marginal_likelyhood.py","file_name":"log_marginal_likelyhood.py","file_ext":"py","file_size_in_byte":6917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"19574632725","text":"\nimport os\n\nimport tensorflow as tf\nimport glob\nimport numpy as np\nimport time\nimport pandas as pd\nimport cv2\n\nfrom ConvNorm import ConvNorm\n\n\n#%%\nPATH = r'dataset'\n\n#%%\nBUFFER_SIZE = 50\nBATCH_SIZE = 5\nPATCH_SIZE = 256\n\n\ndef load_train(inf, trf, htf):\n C = np.array([[17, 141, 215],\n [225, 227, 155],\n [127, 173, 123],\n [185, 122, 87],\n [230, 200, 181],\n [150, 150, 150],\n [193, 190, 175]])\n C = np.reshape(C, (1,1,C.shape[0],3))\n \n input_image = tf.io.decode_png(tf.io.read_file(inf))\n real_image = tf.io.decode_png(tf.io.read_file(trf))\n height_image = tf.io.decode_png(tf.io.read_file(htf), dtype=tf.uint16)\n \n input_image = tf.cast(input_image, tf.float32)\n real_image = tf.cast(real_image, tf.float32)\n height_image = tf.cast(height_image, tf.float32)\n \n orig_image = tf.image.resize(real_image, [PATCH_SIZE, PATCH_SIZE], method=tf.image.ResizeMethod.BILINEAR, antialias=True) / 127.5 - 1\n orig_image.set_shape([None, None, 3])\n \n temp = tf.image.random_crop(tf.stack([input_image, real_image, tf.concat([height_image,tf.zeros_like(height_image),tf.zeros_like(height_image)], axis=2)], axis=0), size=[3, PATCH_SIZE, PATCH_SIZE, 3])\n input_image = temp[0]\n real_image = temp[1]\n height_image = tf.expand_dims(temp[2,:,:,0], -1)\n \n #patch2 = tf.image.random_crop(real_image, size=[PATCH_SIZE, PATCH_SIZE,3]) / 127.5 - 1\n \n input_image = tf.one_hot(tf.argmin(tf.norm(tf.expand_dims(input_image, -2)-C, axis=3), 2), C.shape[2], dtype=tf.float32)\n real_image = real_image / 127.5 - 1\n height_image = height_image / 32767.5 - 1\n \n return real_image, input_image, height_image, orig_image\n\n\n\ndef load_test(imf):\n im = tf.io.decode_png(tf.io.read_file(imf))\n \n H = tf.shape(im)[0]\n W = tf.shape(im)[1]\n h = (H-PATCH_SIZE)//2\n w = (W-PATCH_SIZE)//2\n \n im = im[h:h+PATCH_SIZE, w:w+PATCH_SIZE, :]\n im = tf.cast(im, tf.float32) / 127.5 - 1\n return im\n\n\n#%%\nfiles = glob.glob(os.path.join(PATH, '*_i2.png'))\ntfiles = [f.replace('_i2.png', '_t.png') for f in files]\nhfiles = [f.replace('_i2.png', '_h.png') for f in files]\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((files, tfiles, hfiles)).shuffle(len(files)).map(load_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ntrain_dataset = train_dataset.batch(BATCH_SIZE)\n\ntest_dataset = tf.data.Dataset.from_tensor_slices(tfiles).map(load_test, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(BATCH_SIZE)\n\n\n#%%\ndef conv(x, channels, kernel=3, stride=1, pad=0, pad_type='symmetric', use_bias=True):\n if kernel>1:\n p = (kernel-1)//2\n x = tf.pad(x, [[0,0], [p,p], [p,p], [0,0]], mode='SYMMETRIC')\n x = tf.keras.layers.Conv2D(channels, kernel, strides=stride, padding='valid', kernel_initializer=tf.keras.initializers.GlorotUniform(), use_bias=use_bias, bias_initializer=tf.initializers.constant(0.0))(x)\n return x\n \n\ndef resblock(x_init, stride=1):\n channel_in = x_init.get_shape().as_list()[-1]\n n = 1\n if stride>1:\n n = 2\n \n x_init = tf.nn.leaky_relu(x_init, 0.2)\n #x = conv(x_init, channel_middle, stride=stride)\n x = ConvNorm(channel_in//2, kernel_size=1)(x_init)\n \n x = tf.nn.leaky_relu(x, 0.2)\n #x = conv(x, channel_middle)\n x = ConvNorm(channel_in//2, strides=stride)(x)\n \n x = tf.nn.leaky_relu(x, 0.2)\n #x = conv(x, channels)\n x = ConvNorm(channel_in*n, kernel_size=1)(x)\n \n if stride>1:\n #x_init = conv(x_init, channels, kernel=1, stride=stride)\n x_init = ConvNorm(channel_in*n, kernel_size=1, strides=stride)(x_init)\n \n return x + x_init\n\n\ndef regnet_block(x_init, channels, g, stride=1):\n channel_in = x_init.get_shape().as_list()[-1]\n \n x_init = tf.nn.leaky_relu(x_init, 0.2)\n #x_init = tf.nn.relu(x_init)\n x = ConvNorm(channels, kernel_size=1)(x_init)\n \n x = tf.nn.leaky_relu(x, 0.2)\n #x = tf.nn.relu(x)\n x = ConvNorm(channels, kernel_size=3, group_size=g, strides=stride)(x)\n \n x = tf.nn.leaky_relu(x, 0.2)\n #x = tf.nn.relu(x)\n x = ConvNorm(channels, kernel_size=1)(x)\n \n if channel_in!=channels:\n x_init = ConvNorm(channels, kernel_size=1, strides=stride)(x_init)\n \n return x + x_init\n \n \n\ndef regnetx(x, D, W, G):\n assert len(D)==len(W)\n \n for i,d in enumerate(D):\n for j in range(d):\n if j==0 and i>0:\n x = regnet_block(x, W[i], G, stride=2)\n else:\n x = regnet_block(x, W[i], G)\n \n return x\n\n\n\n\n#%%\nOUTPUT_CHANNELS = 64\nSTART_CHANNELS = 32\n\ndef FeatureExtractor(in_channels, blocks=5):\n x_init = tf.keras.layers.Input(shape=[None, None, in_channels])\n x = x_init\n \n for i in range(blocks):\n x = resblock(x, stride=2)\n x = resblock(x)\n \n return tf.keras.Model(inputs=x_init, outputs=x)\n\n\ndef FeatureExtractor_regnet(D, W, G):\n x_init = tf.keras.layers.Input(shape=[None, None, W[0]])\n x = regnetx(x_init, D, W, G)\n \n return tf.keras.Model(inputs=x_init, outputs=x)\n\n\ndef Classifier(in_channels, out_channels=128):\n x_init = tf.keras.layers.Input(shape=[None, None, in_channels])\n x = tf.math.reduce_mean(x_init, axis=(1,2))\n \n x = tf.nn.leaky_relu(x, 0.2)\n x = tf.keras.layers.Dense(units=in_channels)(x)\n \n x = tf.nn.leaky_relu(x, 0.2)\n x = tf.keras.layers.Dense(units=out_channels)(x)\n \n x = tf.nn.leaky_relu(x, 0.2)\n x = tf.keras.layers.Dense(units=out_channels)(x)\n \n return tf.keras.Model(inputs=x_init, outputs=x)\n\n\ndef Att_Classifier(in_channels, out_channels=128, h=3, n=64):\n x_init = tf.keras.layers.Input(shape=[None, None, in_channels])\n b = tf.shape(x_init)[0]\n \n x = tf.nn.leaky_relu(x_init, 0.2)\n x = ConvNorm(in_channels, kernel_size=1)(x)\n \n q = tf.keras.layers.Dense(units=h*n)(tf.nn.leaky_relu(tf.math.reduce_mean(x, axis=(1,2)), 0.2))\n q = tf.nn.relu(tf.reshape(q, (-1, h, n)))\n x = tf.nn.leaky_relu(x, 0.2)\n k = tf.reshape(conv(x, n, 1), (b, -1, n))\n v = tf.reshape(conv(x, n, 1), (b, -1, n))\n \n k = tf.nn.softmax(tf.matmul(q, tf.transpose(k, (0,2,1))) / tf.math.sqrt(tf.cast(n, tf.float32)))\n x = tf.reshape(tf.matmul(k, v), (-1, h*n))\n x = tf.keras.layers.Dense(units=out_channels)(x)\n \n return tf.keras.Model(inputs=x_init, outputs=x)\n\n\ndef Init_Layer(in_channels=3, channels=64):\n x_init = tf.keras.layers.Input(shape=[None, None, in_channels])\n x = conv(x_init, channels, kernel=5, stride=2)\n return tf.keras.Model(inputs=x_init, outputs=x)\n\n\n\nEncoder = FeatureExtractor(START_CHANNELS, blocks=5)\n\n# =============================================================================\n# ## RagNetX network structure using notation from paper.\n# #D = [2, 5, 14, 2]\n# #W = [80, 240, 560, 1360]\n# #G = 40\n# #D = [2, 6, 15, 2]\n# #W = [96, 192, 432, 1008]\n# #G = 48\n# D = [2, 4, 10, 2]\n# W = [72, 168, 408, 912]\n# G = 24\n# Encoder = FeatureExtractor_regnet(D, W, G)\n# =============================================================================\n\nFE_outchannels = Encoder.get_layer(index=-1).output_shape[0][-1]\nFE_inchannels = Encoder.get_layer(index=0).output_shape[0][-1]\n\nInit_Layers = [Init_Layer(in_channels=i.shape[3], channels=FE_inchannels) for i in tf.data.experimental.get_structure(train_dataset)[:3]]\nInit_Layers += [Init_Layers[0]]\n\nClassifiers = [Classifier(in_channels=FE_outchannels, out_channels=OUTPUT_CHANNELS) for _ in range(len(tf.data.experimental.get_structure(train_dataset)))]\n#Classifiers = [Att_Classifier(in_channels=FE_outchannels, out_channels=OUTPUT_CHANNELS, n=64) for _ in range(len(tf.data.experimental.get_structure(train_dataset)))]\n\ncombinations = [(0,1),(0,2),(0,3),\n (1,0),(1,2),\n (2,0),(2,1),\n (3,0)]\n\n#%%\nclass MemoryMoCo(tf.keras.layers.Layer):\n def __init__(self, n_views, q_size=1024, T=0.07, combinations=[]):\n super(MemoryMoCo, self).__init__()\n #self.input_size = input_size\n self.n_views = n_views\n self.q_size = q_size\n self.T = T\n \n if len(combinations)==0:\n self.combinations = []\n for i in range(self.n_views):\n for j in np.arange(self.n_views):\n if i!=j:\n self.combinations.append((i,j))\n else:\n self.combinations = combinations\n \n \n def build(self, input_shape):\n self.input_size = input_shape[0][1]\n \n \n def call(self, X, Mem):\n batch = X[0].shape[0]\n \n out0 = []\n Xn = [tf.linalg.norm(i, axis=1, keepdims=True) for i in X]\n Mn = [tf.repeat(tf.linalg.norm(i, axis=2), batch, axis=0) for i in Mem]\n for n,a in enumerate(self.combinations):\n i,j = a\n \n M = tf.concat((tf.expand_dims(X[j], 1), tf.repeat(Mem[j], batch, axis=0)), axis=1, name=f'concat_{i}{j}')\n temp = tf.squeeze(tf.matmul(M, tf.expand_dims(X[i], -1)))\n N = Xn[i] * tf.concat((Xn[j], Mn[j]), axis=1)\n temp = tf.math.exp(temp / N / self.T)\n out0.append(temp)\n \n out1 = []\n for n,a in enumerate(self.combinations):\n i,j = a\n M = []\n for k in np.arange(batch):\n M.append(tf.expand_dims(tf.concat((X[j][k:,:], X[j][:k,:]), axis=0), 0))\n M = tf.concat(M, axis=0)\n temp = tf.squeeze(tf.matmul(M, tf.expand_dims(X[i], -1)))\n N = Xn[i] * tf.linalg.norm(M, axis=2)\n temp = tf.math.exp(temp / N / self.T)\n out1.append(temp)\n \n return out0, out1\n\n\nQ = 10*BATCH_SIZE\nMemory = MemoryMoCo(len(Classifiers), q_size=Q, combinations=combinations)\n\n\ndef NCEloss(x, q_size):\n q_size = tf.cast(q_size, tf.float32)\n eps = 1e-7\n bsz = tf.cast(tf.shape(x)[0], tf.float32)\n \n Pn = q_size / (q_size+1)\n \n P_pos = x[:, 0]\n log_D1 = tf.math.log(P_pos / (P_pos + Pn + eps) + eps)\n \n P_neg = x[:, 1:]\n log_D0 = tf.math.log(tf.ones_like(P_neg) * Pn / (P_neg + Pn + eps) + eps)\n \n loss = - (tf.math.reduce_sum(log_D1) + tf.math.reduce_sum(log_D0)) / bsz\n loss = tf.clip_by_value(loss, -1e10, 1e10)\n return loss\n\n\n\n\n#%%\n\nEncoder_ema = tf.keras.models.clone_model(Encoder)\n# =============================================================================\n# ## Keras Clone Model does not support grouped convolutions properly. Need to rebuild the model and copy variable values over.\n# Encoder_ema = FeatureExtractor_regnet(D, W, G)\n# for i,_ in enumerate(Encoder_ema.variables):\n# Encoder_ema.variables[i].assign(Encoder.variables[i].value())\n# =============================================================================\nEncoder_ema.trainable = False\n\n\nInit_Layers_ema = [tf.keras.models.clone_model(i) for i in Init_Layers]\nClassifiers_ema = [tf.keras.models.clone_model(i) for i in Classifiers]\n\n\n\noptimizer = tf.keras.optimizers.Adam(1e-4, 0.9, clipnorm=100)\n\ncheckpoint_dir = r'ckpt'\nif not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer, Encoder=Encoder, Encoder_ema=Encoder_ema)\ncheckpoint.Classifiers = Classifiers\ncheckpoint.Classifiers_ema = Classifiers_ema\ncheckpoint.Init_Layers = Init_Layers\ncheckpoint.Init_Layers_ema = Init_Layers_ema\n\nmanager = tf.train.CheckpointManager(checkpoint, directory=checkpoint_dir, max_to_keep=5, keep_checkpoint_every_n_hours=2)\nstatus = checkpoint.restore(manager.latest_checkpoint)\n\nfor i in range(len(Classifiers_ema)):\n Classifiers_ema[i].trainable=False\n Init_Layers_ema[i].trainable=False\n\n\n#%%\ndef test_model(in_channels=3):\n x_init = tf.keras.layers.Input(shape=[None, None, in_channels])\n x = Init_Layers[0](x_init)\n x = Encoder(x)\n return tf.keras.Model(inputs=x_init, outputs=x)\nTestModel = test_model(in_channels=Init_Layers[0].get_layer(index=0).output_shape[0][-1])\n\n\n#%%\n@tf.function\ndef train_step(inputs, Mem):\n batch = inputs[0].shape[0]\n with tf.GradientTape() as grad_tape:\n \n x = [Init_Layers[i](inputs[i]) for i in range(len(inputs))]\n \n x0 = [Encoder(i) for i in x]\n #x0 = [tf.nn.avg_pool(i, 4, 4, padding='VALID') for i in x0] #RegNetX network has less downsampling than ResNet.\n y = [tf.stop_gradient(Encoder_ema(i)) for i in x]\n \n y = [tf.stop_gradient(Classifiers_ema[i](Encoder_ema(x[i]))) for i in range(len(x))]\n x = [Classifiers[i](x0[i]) for i in range(len(x0))]\n \n Mem = [tf.expand_dims(i, axis=0) for i in Mem]\n l_inter, l_intra = Memory(x, Mem)\n loss = [NCEloss(i, Q) for i in l_inter] + [NCEloss(i, batch) for i in l_intra]\n if tf.math.reduce_any([(tf.math.is_nan(i) or tf.math.is_inf(i)) for i in loss]):\n tf.print(loss)\n Loss = tf.math.reduce_sum(loss)\n \n V = Encoder.trainable_variables\n for i in set(Init_Layers + Classifiers):\n V += i.trainable_variables\n grad = grad_tape.gradient(Loss, V)\n if tf.math.is_inf(tf.linalg.global_norm(grad)):\n grad = [tf.clip_by_value(i, -1e16, 1e16) for i in grad]\n optimizer.apply_gradients(zip(grad, V))\n \n m = 0.99\n for j in range(len(Encoder.variables)):\n Encoder_ema.variables[j] = m*Encoder_ema.variables[j] + (1-m)*Encoder.variables[j]\n \n for i in range(len(Init_Layers)):\n for j in range(len(Init_Layers[i].variables)):\n Init_Layers_ema[i].variables[j] = m*Init_Layers_ema[i].variables[j] + (1-m)*Init_Layers[i].variables[j]\n \n for i in range(len(Classifiers)):\n for j in range(len(Classifiers[i].variables)):\n Classifiers_ema[i].variables[j] = m*Classifiers_ema[i].variables[j] + (1-m)*Classifiers[i].variables[j]\n \n return y, loss, x0[0]\n\n\ndef fit(train_ds, epochs):\n datpath = 'data'\n if not os.path.isdir(datpath):\n os.makedirs(datpath)\n \n Mem = [[]]*len(Classifiers)\n for inputs in train_ds.take(np.ceil(Q/BATCH_SIZE)):\n for i in range(len(Classifiers)):\n Mem[i] += [Classifiers_ema[i](Encoder_ema(Init_Layers_ema[i](inputs[i]))).numpy()]\n Mem = [np.vstack(i)[-Q:,:] for i in Mem]\n \n for epoch in range(epochs):\n start = time.time()\n \n # Train\n loss = []\n x = []\n im = []\n for inputs in train_ds:\n im.append(np.array((inputs[0]+1)*127.5).astype('uint8'))\n nMem, l, emb = train_step(inputs, Mem)\n \n for i in range(len(Mem)):\n Mem[i] = np.vstack([Mem[i], nMem[i].numpy()])[-Q:,:]\n \n l = np.array(l)\n loss.append(np.array(l))\n x.append(np.mean(np.array(emb), axis=(1,2)))\n \n loss = np.array(loss)\n pd.DataFrame(loss).to_csv(os.path.join(datpath, f'L{epoch:02d}.csv'), index=None, header=None)\n print('Epoch {} took {} min'.format(epoch, np.round((time.time()-start)/60, 2)))\n print(np.median(loss, axis=0))\n \n pd.DataFrame(np.vstack(x)).to_csv(os.path.join(datpath, f'Xt{epoch:02d}.csv'), index=None, header=None)\n im = np.vstack(im)\n imout = np.zeros((100*256, 50*256, 3), dtype='uint8')\n for i in range(100):\n for j in range(50):\n imout[256*i:256*(i+1), 256*j:256*(j+1), :] = im[i*50+j,:,:,::-1]\n cv2.imwrite(os.path.join(datpath, f'It{epoch:02d}.png'), imout)\n \n if np.mod(epoch+1, 2)==0 or epoch==epochs-1:\n x = []\n im = []\n for inputs in test_dataset:\n im.append(np.array((inputs+1)*127.5).astype('uint8'))\n emb = TestModel(inputs)\n \n x.append(np.mean(np.array(emb), axis=(1,2)))\n #emb = tf.nn.avg_pool(emb, 4, 4, padding='VALID')\n #x.append(np.reshape(np.array(emb), (BATCH_SIZE, -1)))\n \n x = np.vstack(x)\n pd.DataFrame(x).to_csv(os.path.join(datpath, f'X{epoch:02d}.csv'), index=None, header=None)\n \n im = np.vstack(im)\n imout = np.zeros((100*256, 50*256, 3), dtype='uint8')\n for i in range(100):\n for j in range(50):\n imout[256*i:256*(i+1), 256*j:256*(j+1), :] = im[i*50+j,:,:,::-1]\n cv2.imwrite(os.path.join(datpath, f'I{epoch:02d}.png'), imout)\n \n \n if np.mod(epoch+1, 1) == 0:\n manager.save()\n if not os.path.isdir('encoders'):\n os.makedirs('encoders')\n TestModel.save(f'encoders\\Encoder{epoch:02d}', include_optimizer=False)\n \n if np.any(np.isnan(loss)):\n break\n \n return x, im\n\n#%%\nEPOCHS = 20\nX, IM = fit(train_dataset, EPOCHS)\n\n\n#%%\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import AnnotationBbox, OffsetImage\nfrom umap import UMAP\n\nU0 = UMAP(n_neighbors=15).fit(X)\nU = U0.embedding_\n\nplt.scatter(U[:,0],U[:,1],c='k')\nax = plt.gca()\nfor i in np.random.choice(IM.shape[0], 800, replace=False):\n im = cv2.resize(IM[i,:,:,:], (150,150))\n imagebox = OffsetImage(im, zoom=0.25)\n imagebox.image.axes = ax\n ab = AnnotationBbox(imagebox, U[i,:2], xybox=(0, 0), xycoords='data', boxcoords=\"offset points\", pad=0)\n ax.add_artist(ab)\n\n","repo_name":"tpapp157/Contrastive_Multiview_Coding-Momentum","sub_path":"CMC_combined.py","file_name":"CMC_combined.py","file_ext":"py","file_size_in_byte":17460,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"2"} +{"seq_id":"5714426467","text":"# -*- coding: utf-8 -*-\n#设计一个算法,计算出n阶乘中尾部零的个数\n#O(logN)的时间复杂度\nclass Solution:\n \"\"\"\n @param: n: An integer\n @return: An integer, denote the number of trailing zeros in n!\n \"\"\"\n def trailingZeros(self, n):\n # write your code here, try to do it without arithmetic operators.\n zeronum = 0\n while n >= 5:\n zeronum = zeronum + int(n / 5)\n n = int(n / 5)\n return zeronum\n\n\ns1 = Solution()\nprint(s1.trailingZeros(101))","repo_name":"xiaoyuehui/LintCode","sub_path":"easy/Trailing_Zeros.py","file_name":"Trailing_Zeros.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70172487088","text":"table = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16, 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24, 'Y': 25, 'Z': 26, '.': 27, ',': 28, '!': 29, '?': 30, ' ': 31, 'Ç': 32, 'Á': 33, 'É': 34, 'Í': 35, 'Ó': 36, 'Ú': 37, 'Â': 38, 'Ê': 39, 'Ô': 40}\n\ncoding_matrix = [[4, 1], \n [3, 1]]\n\ndecoder_matrix = [[1, -1], \n [-3, 4]]\n\nprint('=-' * 40)\nmessage = input('\\033[1;32;40mEnter the message to be encrypted:\\33[m \\n').upper()\n\nwhile len(message) < 4:\n message = input('[ERROR] Enter more than 4 characters: ').upper()\n\ndef original_message():\n message_matrix = []\n letters = list(message)\n for letter in letters:\n if letter in table:\n message_matrix.append(table[letter])\n\n else: \n message_matrix.append(0)\n\n if len(message) % 2 != 0:\n message_matrix.append(table[' '])\n \n return message_matrix\n\ndef message_encoding():\n message_matrix = original_message()\n half_list = len(message_matrix) // 2\n first_line = message_matrix[:half_list]\n second_line = message_matrix[half_list:]\n encoded_first_line = []\n encoded_second_line = []\n\n for i, num in enumerate(first_line):\n encoded_num_ft = coding_matrix[0][0] * first_line[i] + coding_matrix[0][1] * second_line[i]\n encoded_first_line.append(encoded_num_ft)\n\n for i, num in enumerate(second_line):\n encoded_num_sd = coding_matrix[1][0] * first_line[i] + coding_matrix[1][1] * second_line[i]\n encoded_second_line.append(encoded_num_sd)\n\n\n return encoded_first_line, encoded_second_line\n\n\ndef message_decoding():\n encoded_first_line, encoded_second_line = message_encoding()\n joint = []\n decoded_message = []\n \n for i, num in enumerate(encoded_first_line):\n decoded_num_ft = decoder_matrix[0][0] * encoded_first_line[i] + decoder_matrix[0][1] * encoded_second_line[i]\n joint.append(decoded_num_ft)\n\n for i, num in enumerate(encoded_second_line):\n decoded_num_sd = decoder_matrix[1][0] * encoded_first_line[i] + decoder_matrix[1][1] * encoded_second_line[i]\n joint.append(decoded_num_sd)\n\n for num in joint:\n for key, value in table.items():\n if value == num:\n decoded_message.append(key)\n \n return decoded_message\n\n\noption = 0\ndef options(option):\n\n if option == 1:\n print('\\n\\033[1;30;46mMensagem Original:\\33[m\\n')\n print(original_message())\n\n elif option == 2:\n print('\\n\\033[1;30;41mMensagem Codificada:\\33[m\\n')\n print(message_encoding())\n elif option == 3:\n print('\\n\\033[1;30;42mMensagem Decodificada:\\33[m\\n')\n print(message_decoding())\n elif option == 4:\n print('leaving the program...')\n else:\n print('\\033[1;31;40m[ERROR] Option not found.\\33[m')\n\n\nwhile option != 4:\n\n print('''\n [1] See original message\n [2] Encode message\n [3] Decode message\n [4] Exit\n ''')\n\n try:\n option = int(input('Enter your option: '))\n options(option)\n except:\n print('\\033[1;31;40m[ERROR] Enter the numeric value.\\33[m')","repo_name":"dudaborges/cryptography_backend","sub_path":"table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"857784800","text":"\"\"\"\nList active tags and aggregations returns \"Success\" response\n\"\"\"\n\nfrom datadog_api_client import ApiClient, Configuration\nfrom datadog_api_client.v2.api.metrics_api import MetricsApi\n\nconfiguration = Configuration()\nwith ApiClient(configuration) as api_client:\n api_instance = MetricsApi(api_client)\n response = api_instance.list_active_metric_configurations(\n metric_name=\"static_test_metric_donotdelete\",\n )\n\n print(response)\n","repo_name":"DataDog/datadog-api-client-python","sub_path":"examples/v2/metrics/ListActiveMetricConfigurations.py","file_name":"ListActiveMetricConfigurations.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"} +{"seq_id":"43824208001","text":"from datetime import datetime\n\nimport requests\n\n\nclass CoWinClient(object):\n home_url = 'https://www.cowin.gov.in/'\n registration_url = 'https://selfregistration.cowin.gov.in/'\n date_str = datetime.now().strftime('%d-%m-%Y')\n\n def __init__(self, district_id):\n self.district_id = district_id\n self.url = (f'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?'\n f'district_id={self.district_id}&date={self.date_str}')\n\n def get_under_45_capacity(self):\n \"\"\"\n Fetches data from CoWin portal for self.district_id and returns a dict with dose 1, dose 2 and\n total availability\n :return: dict, e.g. { 'total': 100, 'dose_1': 55, 'dose_2': 45 }\n \"\"\"\n headers = self.get_request_headers()\n res = requests.get(self.url, headers=headers)\n res_json = res.json()\n all_centers = res_json.get('centers') or []\n under_45_name_suffix = '18 to 44'\n under_45_centers = [center for center in all_centers\n if under_45_name_suffix in center['name'].lower()]\n print(f'Total under 45 centers: {len(under_45_centers)}')\n total_capacity = {\n 'total': 0,\n 'dose_1': 0,\n 'dose_2': 0,\n }\n for center in under_45_centers:\n center_capacity = {\n 'total': 0,\n 'dose_1': 0,\n 'dose_2': 0,\n }\n for session in center['sessions']:\n center_capacity['total'] += session['available_capacity']\n center_capacity['dose_1'] += session['available_capacity_dose1']\n center_capacity['dose_2'] += session['available_capacity_dose2']\n total_capacity['total'] += center_capacity['total']\n total_capacity['dose_1'] += center_capacity['dose_1']\n total_capacity['dose_2'] += center_capacity['dose_2']\n print(f\"{center['name']}: {center_capacity}\")\n return total_capacity\n\n def get_capacity_for_minimum_age(self, min_age):\n headers = self.get_request_headers()\n res = requests.get(self.url, headers=headers)\n res_json = res.json()\n all_centers = res_json.get('centers') or []\n print(f'Total centers: {len(all_centers)}')\n total_capacity = {\n 'total': 0,\n 'dose_1': 0,\n 'dose_2': 0,\n }\n total_number_of_centers = 0\n for center in all_centers:\n center_capacity = {\n 'total': 0,\n 'dose_1': 0,\n 'dose_2': 0,\n }\n for session in center['sessions']:\n if session['min_age_limit'] <= min_age:\n center_capacity['total'] += session['available_capacity']\n center_capacity['dose_1'] += session['available_capacity_dose1']\n center_capacity['dose_2'] += session['available_capacity_dose2']\n if center_capacity['total']:\n total_number_of_centers += 1\n print(f'Center name: {center[\"name\"]}; Capacity: {center_capacity}')\n total_capacity['total'] += center_capacity['total']\n total_capacity['dose_1'] += center_capacity['dose_1']\n total_capacity['dose_2'] += center_capacity['dose_2']\n print(f'# of centers with min age {min_age}: {total_number_of_centers}')\n return total_capacity\n\n @staticmethod\n def get_request_headers():\n headers = {\n 'authority': 'cdn-api.co-vin.in',\n 'sec-ch-ua': '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"90\", \"Google Chrome\";v=\"90\"',\n 'accept': 'application/json, text/plain, */*',\n 'sec-ch-ua-mobile': '?0',\n 'user-agent': ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/90.0.4430.93 Safari/537.36'),\n 'origin': 'https://www.cowin.gov.in',\n 'sec-fetch-site': 'cross-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://www.cowin.gov.in/',\n 'accept-language': 'en_US,en;q=0.9',\n 'if-none-match': 'W/\"15514-BSMeW8/8h2KpNSRQYKyYKhG3wEg\"',\n }\n return headers\n\n\nif __name__ == '__main__':\n lucknow_district_id = 670\n co_win_client = CoWinClient(lucknow_district_id)\n capacity = co_win_client.get_under_45_capacity()\n # capacity = co_win_client.get_capacity_for_minimum_age(20)\n print(f'Total under 45 capacity: {capacity}')\n","repo_name":"animesh21/covid-vaccine-alerts","sub_path":"cowin_client.py","file_name":"cowin_client.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11635896654","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import DistanceMetric\r\nimport io\r\nimport base64\r\nfrom datetime import timedelta\r\nimport mysql.connector\r\n\r\nimport os\r\nfrom flask import (\r\n Flask, session, redirect, url_for,\r\n render_template, request\r\n)\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'mysecretkey'\r\n\r\napp.permanent_session_lifetime = timedelta(hours=1)\r\n\r\n# Load the crime data from the CSV file\r\ncrime_data = pd.read_csv('crime_data.csv')\r\nBING_MAPS_API_KEY = 'AhETvb0ezYzJJ_GeTfKGSUKRFaZJoFHYD7beSs1n1EZxmU_LqFk1U4vc2rj9Pdhk'\r\n\r\n\r\n\r\n# Function to authenticate username and password\r\ndef authenticate(username, password):\r\n # Here, we are using a local MySQL database to store the credentials\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n database=\"mydatabase\"\r\n )\r\n\r\n mycursor = mydb.cursor()\r\n sql = \"SELECT * FROM users WHERE username = %s AND password = %s\"\r\n val = (username, password)\r\n mycursor.execute(sql, val)\r\n result = mycursor.fetchone()\r\n\r\n if result:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\n@app.route('/login', methods=['POST', 'GET'])\r\ndef login():\r\n if request.method == 'GET':\r\n if session.get('is_logged_in'):\r\n return redirect(url_for('dashboard'))\r\n else:\r\n return render_template('login.html')\r\n\r\n elif request.method == 'POST':\r\n user_name = request.form.get('username')\r\n passwd = request.form.get('password')\r\n\r\n if authenticate(user_name, passwd):\r\n session['is_logged_in'] = True\r\n session.permanent = True\r\n return redirect(url_for('dashboard'))\r\n else:\r\n return render_template('login.html', cant_authenticate=True)\r\n\r\n\r\n@app.route('/dashboard', methods=['GET', 'POST'])\r\ndef dashboard():\r\n if request.method == 'GET':\r\n if session.get('is_logged_in'):\r\n return render_template('dashboard.html')\r\n else:\r\n return redirect(url_for('login'))\r\n\r\n elif request.method == 'POST':\r\n if session.get('is_logged_in'):\r\n file = request.files['file']\r\n\r\n # Save the uploaded file to the root directory\r\n file.save(os.path.join(app.root_path, 'crime_data.csv'))\r\n return 'File uploaded successfully'\r\n else:\r\n return redirect(url_for('login'))\r\n\r\n@app.route('/logout', methods=['POST'])\r\ndef logout():\r\n if request.get_json():\r\n session['is_logged_in'] = False\r\n\r\n # specify in which location you want go after you logout\r\n resp = {\r\n \"resp\": url_for('login')\r\n }\r\n return resp\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n # crime_rate = 3.05\r\n # latitude = 10.516\r\n # longitude = 76.2157\r\n crime_rate = 0\r\n latitude = 0\r\n longitude = 0\r\n\r\n base64_scatter_plot = open('./static/assets/blank_scatter_plot_base64_str.txt', 'r').read()\r\n base64_scatter_plot = 'data:image/png;base64,' + base64_scatter_plot\r\n\r\n base64_bar_chart = open('./static/assets/blank_bar_chart_base64_str.txt', 'r').read()\r\n base64_bar_chart = 'data:image/png;base64,'+ base64_bar_chart\r\n\r\n if request.method == 'POST':\r\n # Extract the latitude and longitude from the form data\r\n data = request.get_json()\r\n latitude = float(data.get('latitude'))\r\n longitude = float(data.get('longitude'))\r\n\r\n # Find the 10 closest locations to the input latitude and longitude\r\n dist = DistanceMetric.get_metric('haversine')\r\n distances = dist.pairwise(\r\n crime_data[['latitude', 'longitude']], [(latitude, longitude)]\r\n )[:, 0]\r\n crime_data['distance'] = distances\r\n closest_locations = crime_data.sort_values('distance').iloc[:10]\r\n\r\n # Train a linear regression model on the closest locations\r\n model = LinearRegression()\r\n model.fit(\r\n pd.DataFrame(closest_locations, columns=['latitude', 'longitude']),\r\n closest_locations['crime_rate']\r\n )\r\n\r\n # Predict the crime rate for the input location\r\n\r\n crime_rate = round(model.predict([[latitude, longitude]])[0], 2)\r\n # Create the scatter plot\r\n plt.figure(figsize=(8, 6))\r\n plt.title('Crime Rate in Kerala')\r\n plt.scatter(crime_data['longitude'], crime_data['latitude'],\r\n c=crime_data['crime_rate'], cmap='plasma')\r\n plt.xlabel('Longitude')\r\n plt.ylabel('Latitude')\r\n\r\n # Add a green marker for the input location with the predicted crime rate\r\n plt.scatter(\r\n longitude, latitude, c='green', s=100, label=f'Predicted crime rate: {crime_rate}'\r\n )\r\n plt.legend()\r\n\r\n # Save the plot to memory\r\n buf = io.BytesIO()\r\n plt.savefig(buf, format='png')\r\n buf.seek(0)\r\n\r\n # Encode the bytes object to Base64\r\n base64_scatter_plot = base64.b64encode(buf.getvalue()).decode('utf-8')\r\n base64_scatter_plot = 'data:image/png;base64,' + base64_scatter_plot\r\n\r\n # Create the bar chart\r\n plt.figure(figsize=(8, 6))\r\n plt.title('Crime Rate by Location: Nearest 10 Locations')\r\n bars = plt.bar(\r\n range(len(closest_locations)), closest_locations['crime_rate'], color='purple'\r\n )\r\n plt.xticks(range(len(closest_locations)), closest_locations.index)\r\n plt.xlabel('Location')\r\n plt.ylabel('Crime Rate')\r\n\r\n # Add a legend to explicitly show which bar represents your location\r\n bars[0].set_color('green')\r\n plt.legend([bars[0]], ['Your location'])\r\n\r\n # Save the plot to memory\r\n buf = io.BytesIO()\r\n plt.savefig(buf, format='png')\r\n buf.seek(0)\r\n\r\n # Encode the bytes object to Base64\r\n base64_bar_chart = base64.b64encode(buf.getvalue()).decode('utf-8')\r\n base64_bar_chart = 'data:image/png;base64,'+base64_bar_chart\r\n\r\n context = {\r\n 'latitude': latitude, 'longitude': longitude, 'bing_maps_api_key': BING_MAPS_API_KEY\r\n }\r\n # Render the results page with the plots\r\n # return render_template('index.html', crime_rate=crime_rate, **context)\r\n resp = {\r\n 'result': render_template(\r\n 'index.html', crime_rate=crime_rate, base64_scatter_plot=base64_scatter_plot,\r\n base64_bar_plot=base64_bar_chart, **context\r\n )\r\n }\r\n return resp\r\n\r\n context = {\r\n 'latitude': latitude, 'longitude': longitude, 'bing_maps_api_key': BING_MAPS_API_KEY\r\n }\r\n # If the method is GET, render the input form page\r\n return render_template(\r\n 'index.html', crime_rate=crime_rate, base64_scatter_plot=base64_scatter_plot,\r\n base64_bar_plot=base64_bar_chart, **context\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"penguinlips/safe_location","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15145308231","text":"# -*- coding: utf-8 -*-\r\n\r\nimport re, urllib\r\n\r\nfrom platformcode import config, logger\r\nfrom core.item import Item\r\nfrom core import httptools, scrapertools, servertools, tmdb\r\n\r\nhost = 'https://retroseriestv.com/'\r\n\r\n\r\ndef mainlist(item):\r\n return mainlist_series(item)\r\n\r\ndef mainlist_series(item):\r\n logger.info()\r\n itemlist = []\r\n\r\n itemlist.append(item.clone( title='Todas las series', action='list_all', url=host + 'seriestv/' ))\r\n\r\n itemlist.append(item.clone( title='Series por género', action='generos' ))\r\n itemlist.append(item.clone( title='Series por año de lanzamiento', action='anyos' ))\r\n\r\n itemlist.append(item.clone( title = 'Buscar serie ...', action = 'search', search_type = 'tvshow' ))\r\n\r\n return itemlist\r\n\r\n\r\ndef generos(item):\r\n logger.info()\r\n itemlist = []\r\n\r\n data = httptools.downloadpage(host + 'seriestv/').data\r\n\r\n patron = '
  • ([^<]+) ([^<]+)
  • '\r\n matches = re.compile(patron, re.DOTALL).findall(data)\r\n\r\n for url, title, cantidad in matches:\r\n if cantidad == '0': continue\r\n itemlist.append(item.clone( action=\"list_all\", title='%s (%s)' % (title, cantidad), url=url ))\r\n\r\n return itemlist\r\n\r\ndef anyos(item):\r\n logger.info()\r\n itemlist = []\r\n\r\n data = httptools.downloadpage(host + 'seriestv/').data\r\n\r\n patron = '' in data: data = data.split('
    ')[0]\r\n\r\n matches = re.compile('', re.DOTALL).findall(data)\r\n for data_show in matches:\r\n thumb = scrapertools.find_single_match(data_show, ' src=\"([^\"]+)')\r\n url, title = scrapertools.find_single_match(data_show, '

    ([^<]+)')\r\n year = scrapertools.find_single_match(data_show, '(\\d{4})')\r\n if not year: year = '-'\r\n plot = scrapertools.find_single_match(data_show, '
    (.*?)
    ')\r\n \r\n itemlist.append(item.clone( action='temporadas', url=url, title=title, thumbnail=thumb, \r\n contentType='tvshow', contentSerieName=title, \r\n infoLabels={'year': year, 'plot': scrapertools.htmlclean(plot)} ))\r\n\r\n tmdb.set_infoLabels(itemlist)\r\n\r\n next_page = scrapertools.find_single_match(data, ' href=\"([^\"]+)\"\\s*>Temporada (\\d+)\", re.DOTALL).findall(data)\r\n for numtempo in matches:\r\n itemlist.append(item.clone( action='episodios', title='Temporada %s' % numtempo, \r\n contentType='season', contentSeason=numtempo ))\r\n \r\n tmdb.set_infoLabels(itemlist)\r\n\r\n return itemlist\r\n\r\n\r\n# Si una misma url devuelve los episodios de todas las temporadas, definir rutina tracking_all_episodes para acelerar el scrap en trackingtools.\r\ndef tracking_all_episodes(item):\r\n return episodios(item)\r\n\r\ndef episodios(item):\r\n logger.info()\r\n itemlist = []\r\n\r\n data = httptools.downloadpage(item.url).data\r\n\r\n matches = re.compile(\"
  • (.*?)
  • \", re.DOTALL).findall(data)\r\n for data_epi in matches:\r\n # ~ logger.debug(data_epi)\r\n\r\n try:\r\n season, episode = scrapertools.find_single_match(data_epi, \"
    (\\d+)\\s*-\\s*(\\d+)\")\r\n except:\r\n continue\r\n\r\n if item.contentSeason and item.contentSeason != int(season):\r\n continue\r\n\r\n thumb = scrapertools.find_single_match(data_epi, \" src='([^']+)\")\r\n url, title = scrapertools.find_single_match(data_epi, \" href='([^']+)'>([^<]+)\")\r\n titulo = '%sx%s %s' % (season, episode, title)\r\n\r\n itemlist.append(item.clone( action='findvideos', url=url, title=titulo, thumbnail=thumb, \r\n contentType='episode', contentSeason=season, contentEpisodeNumber=episode ))\r\n\r\n tmdb.set_infoLabels(itemlist)\r\n\r\n return itemlist\r\n\r\n\r\ndef detectar_server(servidor):\r\n #TODO? verificar servers\r\n srv = servidor.lower()\r\n if srv == 'waaw' or srv == 'netu': srv = 'netutv'\r\n return srv\r\n\r\ndef findvideos(item):\r\n logger.info()\r\n itemlist = []\r\n\r\n IDIOMAS = {'es':'Esp', 'mx':'Lat', 'ar':'Lat', 'pe':'Lat', 'en':'Eng', 'gb':'Eng', 'vose':'VOSE', 'vos':'VOS', 'fr':'Fra', 'jp':'Jap'}\r\n\r\n data = httptools.downloadpage(item.url).data\r\n\r\n patron = \"data-type='([^']+)' data-post='(\\d+)' data-nume='(\\d+)'.*?([^<]+).*?([^.<]+).*?img src='([^']+)'\"\r\n matches = re.compile(patron, re.DOTALL).findall(data)\r\n if not matches:\r\n patron = 'data-type=\"([^\"]+)\" data-post=\"(\\d+)\" data-nume=\"(\\d+)\".*?([^<]+).*?([^.<]+).*?img src=\\'([^\\']+)\\''\r\n matches = re.compile(patron, re.DOTALL).findall(data)\r\n \r\n for dtype, dpost, dnume, titulo, servidor, lang in matches:\r\n lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?)\\.png')\r\n\r\n itemlist.append(Item( channel = item.channel, action = 'play', server = detectar_server(servidor),\r\n title = '', dtype = dtype, dpost = dpost, dnume = dnume, \r\n language = IDIOMAS.get(lang, lang)\r\n ))\r\n\r\n return itemlist\r\n\r\n\r\ndef play(item):\r\n logger.info()\r\n itemlist = []\r\n\r\n post = urllib.urlencode( {'action': 'doo_player_ajax', 'post': item.dpost, 'nume': item.dnume, 'type': item.dtype} )\r\n data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data\r\n\r\n url = scrapertools.find_single_match(data, \"src='([^']+)'\")\r\n\r\n if url.startswith(host):\r\n locationurl = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get('location', '')\r\n if locationurl != '':\r\n try:\r\n b64url = scrapertools.find_single_match(locationurl, \"y=(.*)\")\r\n if b64url != '': url = base64.b64decode(b64url)\r\n else: url = locationurl\r\n except:\r\n url = locationurl\r\n \r\n if url != '': \r\n itemlist.append(item.clone(url = url))\r\n\r\n return itemlist\r\n\r\n\r\n\r\ndef search(item, texto):\r\n logger.info(\"texto: %s\" % texto)\r\n itemlist = []\r\n\r\n try:\r\n data = httptools.downloadpage(host + '?s=' + texto.replace(\" \", \"+\")).data\r\n if '
    ' in data: data = data.split('
    ')[0]\r\n\r\n matches = re.compile('
    (.*?)
    ', re.DOTALL).findall(data)\r\n for data_show in matches:\r\n thumb = scrapertools.find_single_match(data_show, ' src=\"([^\"]+)')\r\n url, title = scrapertools.find_single_match(data_show, '
    ([^<]+)')\r\n year = scrapertools.find_single_match(data_show, '(\\d{4})')\r\n if not year: year = '-'\r\n plot = scrapertools.find_single_match(data_show, '

    (.*?)

    ').strip()\r\n \r\n itemlist.append(item.clone( action='temporadas', url=url, title=title, thumbnail=thumb, \r\n contentType='tvshow', contentSerieName=title, \r\n infoLabels={'year': year, 'plot': scrapertools.htmlclean(plot)} ))\r\n\r\n tmdb.set_infoLabels(itemlist)\r\n return itemlist\r\n\r\n except:\r\n import sys\r\n for line in sys.exc_info():\r\n logger.error(\"%s\" % line)\r\n return []\r\n","repo_name":"gacj22/WizardGacj22","sub_path":"plugin.video.balandro/channels/retroseriestv.py","file_name":"retroseriestv.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"557570222","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 13 09:56:08 2023\n\n@author: pokor076\n\"\"\"\nimport numpy as np\ndef create_grating(size, orientation_rad, spatial_frequency, phase, \n radius, amplitude):\n # Create 2D coordinate grid\n x, y = np.meshgrid(np.arange(size[0]), np.arange(size[1]))\n # Compute distance from center of image\n x_center, y_center = size[0] / 2, size[1] / 2\n r = np.sqrt((x - x_center)**2 + (y - y_center)**2)\n # Create circular mask\n mask = np.zeros(size)\n mask[r < radius] = 1\n # Compute grating\n k = 2*np.pi*spatial_frequency\n grating = amplitude * np.sin(k * (np.cos(orientation_rad)*x + np.sin(orientation_rad)*y) + phase)\n # Apply circular mask\n masked_grating = grating * mask\n #vgg16 needs a 3 dimensional RGB image so we'll just concatenate the image to \n #itself 3 times and shuffle around the dimensions\n img = np.expand_dims(masked_grating, axis=0)\n img= np.concatenate((img, img, img),axis = 0)\n img = np.moveaxis(img,0,2)\n img = np.expand_dims(img, axis=0)\n # Display grating\n return img\n ","repo_name":"vpokorny123/DCNN_illusions","sub_path":"local_functions/create_grating.py","file_name":"create_grating.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28838444055","text":"\"\"\"A module of different graph types and other properties.\"\"\"\nfrom __future__ import annotations\n\nimport functools\nimport json\nfrom abc import ABC, abstractmethod\nfrom os.path import abspath, dirname, join\nfrom types import TracebackType\nfrom typing import Callable, Dict, Hashable, Iterable, Type\n\nimport jsonschema\n\nfrom pyaestro.bases import Specifiable\nfrom pyaestro.dataclasses import GraphEdge\nfrom pyaestro.typing import Comparable\n\nSCHEMA_DIR = join(dirname(dirname(abspath(__file__))), \"_schemas\")\n\n\nclass Graph(Specifiable, ABC):\n \"\"\"An abstact interface for building Graph data structures.\"\"\"\n\n with open(join(SCHEMA_DIR, \"graph.json\")) as schema:\n _dict_schema = json.load(schema)\n\n @classmethod\n def _read_only(cls, method: Callable):\n @functools.wraps(method)\n def locked_function(*args, **kwargs):\n self = args[0]\n if self._locked:\n raise RuntimeError(\n f\"Unable to call method '{method.__name__}' while in \"\n \"read-only context.\"\n )\n return method(*args, **kwargs)\n\n return locked_function\n\n @classmethod\n def __init_subclass__(cls, *args, **kwargs):\n super().__init_subclass__(*args, **kwargs)\n cls.__setitem__ = cls._read_only(cls.__setitem__)\n cls.__delitem__ = cls._read_only(cls.__delitem__)\n cls.remove_edge = cls._read_only(cls.remove_edge)\n cls.add_edge = cls._read_only(cls.add_edge)\n cls.delete_edges = cls._read_only(cls.delete_edges)\n\n def __init__(self):\n self._vertices = {}\n self._locked = False\n\n def __contains__(self, key: Hashable) -> bool:\n return self._vertices.__contains__(key)\n\n def __getitem__(self, key):\n try:\n return self._vertices[key]\n except KeyError as key_error:\n raise KeyError(f\"Key '{key_error.args[0]}' not found in graph.\")\n\n def __setitem__(self, key: Hashable, value: object) -> None:\n self._vertices[key] = value\n\n def __delitem__(self, key: Hashable) -> None:\n try:\n self.delete_edges(key)\n del self._vertices[key]\n except KeyError as key_error:\n raise KeyError(f\"Key '{key_error.args[0]}' not found in graph.\")\n\n def __repr__(self) -> str:\n return \"{}()\".format(type(self).__name__)\n\n def __iter__(self) -> Iterable[str]:\n for vertex in self._vertices.keys():\n yield vertex\n\n def __len__(self) -> int:\n return len(self._vertices)\n\n def __enter__(self) -> Graph:\n self._locked = True\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self._locked = False\n if exc_val:\n raise exc_val\n\n @classmethod\n def from_specification(\n cls, specification: Dict[Hashable, Dict[Hashable, object]]\n ) -> Type[Graph]:\n \"\"\"Construct a Graph based on a specification of edges and vertices.\n\n Args:\n specification (Dict[Hashable, Dictionary[Hashable, object]]):\n A dictionary containing two keys:\n edges: A dictionary of neighbors for each vertex containing\n a list of (neighbor, weight) tuples.\n vertices: A dictionary mapping keys to their values.\n\n Returns:\n Type[Graph]: An instance of the type Graph.\n\n Raises:\n ValidationError: Raised when specification does not match the fixed\n schema for a Graph.\n \"\"\"\n graph = cls()\n jsonschema.validate(specification, schema=cls._dict_schema)\n\n for vertex, value in specification[\"vertices\"].items():\n graph[vertex] = value\n\n for node, neighbors in specification[\"edges\"].items():\n for neighbor, weight in neighbors:\n graph.add_edge(node, neighbor, weight)\n\n return graph\n\n @abstractmethod\n def delete_edges(self, key: Hashable) -> None:\n \"\"\"Delete all edges associated to a key from the Graph.\n\n Args:\n key (Hashable): Key to a node whose edges are to be removed.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def edges(self) -> Iterable[GraphEdge]:\n \"\"\"Iterate the edges of a graph.\n\n Returns:\n Iterable[GraphEdge]: An iterable of tuples containing edges.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def add_edge(\n self, a: Hashable, b: Hashable, weight: Comparable = 0\n ) -> None:\n \"\"\"Add an edge to the graph.\n\n Args:\n a (Hashable): Key identifying side 'a' of an edge.\n b (Hashable): Key identifying side 'b' of an edge.\n weight(Comparable): Weight of the edge between 'a' and 'b'.\n Defaults to 0 for unweighted.\n\n Raises:\n KeyError: Raised when either node 'a' or node 'b'\n do not exist in the graph.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def remove_edge(self, a: Hashable, b: Hashable) -> None:\n \"\"\"Remove an edge to the graph.\n\n Args:\n a (Hashable): Key identifying side 'a' of an edge.\n b (Hashable): Key identifying side 'b' of an edge.\n\n Raises:\n KeyError: Raised when either node 'a' or node 'b'\n do not exist in the graph.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_neighbors(self, key: Hashable) -> Iterable[GraphEdge]:\n \"\"\"Get the connected neighbors of the specified node.\n\n Args:\n a (Hashable): Key whose neighbor's should be returned.\n\n Raises:\n KeyError: Raised when 'key' does not exist in the graph.\n\n Returns:\n Iterable[GraphEdge]: An iterable of GraphEdge records that\n represent the neighbors of the vertex named 'key'.\n \"\"\"\n raise NotImplementedError\n","repo_name":"FrankD412/pyaestro","sub_path":"pyaestro/abstracts/graphs/_graph.py","file_name":"_graph.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"12582512953","text":"import re\nimport requests\n\nfrom getUnivData import getUnivData\n\n\nx = requests.get('https://iisma.kemdikbud.go.id/info/host-universities-list/', verify=False)\n\n# get data-elementor-lightbox-title, then find the href in the same line. put it in a list\ny = re.findall(r'data-elementor-lightbox-title=\".*?\" href=\"(.*?)\"', x.text)\n\nunivList = []\n# split by , push to list\nfor i in y:\n univList.append(i.split(','))\n\n\n\n# flatten\nunivList = [item for sublist in univList for item in sublist]\n\n# remove duplicate\nunivList = list(dict.fromkeys(univList))\n\n# print\n# result consist of university name, link, requirement, course, TOEFL iBT score, IELTS score, DET score\nresult = []\nfor i in range(len(univList)):\n print(i, univList[i])\n result.append(getUnivData(univList[i]))\n\n# write as csv with header\nimport csv\nwith open('univData.csv', 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['univName', 'univRequirementDetail', 'univCourse', 'toefl', 'ielts', 'det', 'awardee', 'qsRanking', 'location', 'link'])\n writer.writerows(result)\n\n\n","repo_name":"christojeffrey/scraping-iisma","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20750899705","text":"import numpy as np\nimport cv2 as cv\nfrom scipy.sparse import csc_matrix\nfrom tqdm import tqdm\nimport sys\nfrom scipy.sparse import identity\nfrom scipy.sparse.linalg import inv\n\n\nclass SoftMatting:\n\tdef __init__(self, im, t, epsilon, lamb, window=1):\n\t\tself.im = im\n\t\tself.t = t\n\t\tself.window = window\n\t\tself.epsilon = epsilon\n\t\tself.lamb = lamb\n\t\tself.W, self.H, _ = im.shape\n\t\tself.N = self.W * self.H\n\t\tself.part2_matrix = np.zeros([self.W, self.H, self.window, self.window])\n\t\tself.miu_matrix = np.zeros([self.W, self.H, self.window])\n\n\tdef __get_laplacian(self):\n\t\trow = np.array([])\n\t\tcol = np.array([])\n\t\tdata = np.array([])\n\t\twindow_size = self.window * 2 + 1\n\t\tw_k = window_size ** 2\n\t\tU = self.epsilon / w_k * np.eye(3)\n\t\tindexs = np.arange(self.N).reshape(self.W, self.H)\n\t\twith tqdm(total=self.W - 2 * self.window, desc=\"Calculating L\", unit=\"it\", file=sys.stdout) as pbar:\n\t\t\tfor i in range(self.W - 2 * self.window):\n\t\t\t\tfor j in range(self.H - 2 * self.window):\n\t\t\t\t\tim_window = self.im[i:i + window_size, j:j + window_size, :]\n\t\t\t\t\twindow = np.array([im_window[:, :, 0].reshape(w_k),\n\t\t\t\t\t\t\t\t im_window[:, :, 1].reshape(w_k),\n\t\t\t\t\t\t\t\t im_window[:, :, 2].reshape(w_k)])\n\t\t\t\t\tmiu_k = np.mean(window, axis=1)\n\t\t\t\t\tdiff = window - np.tile(miu_k, 9).reshape(9, 3).T\n\t\t\t\t\tcov = (np.dot(diff, diff.T) / w_k) + U\n\t\t\t\t\tL_elem = np.eye(w_k) - (1 + np.dot(np.dot(diff.T, np.linalg.inv(cov)), diff)) / w_k\n\t\t\t\t\tx = indexs[i:i + window_size, j:j + window_size].flatten()\n\t\t\t\t\tx = np.tile(x, 9).reshape(9, 9)\n\t\t\t\t\ty = x.T\n\t\t\t\t\trow = np.append(row, x.flatten())\n\t\t\t\t\tcol = np.append(col, y.flatten())\n\t\t\t\t\tdata = np.append(data, L_elem.flatten())\n\t\t\t\tpbar.update()\n\t\tL = csc_matrix((data, (row, col)), shape=(self.N, self.N))\n\t\treturn L\n\n\tdef get_t(self):\n\t\tL = self.__get_laplacian().todense()\n\t\t# L = csc_matrix((self.N, self.N))\n\t\tt_reshaped = self.t.reshape(1, self.N)\n\t\tT = self.lamb * np.dot(t_reshaped, np.linalg.inv(L + self.lamb * np.eye(self.N)))\n\t\tT = T.reshape(self.W, self.H)\n\t\treturn T\n\n\n","repo_name":"JoycexxZ/ImageDefogging","sub_path":"soft_matting.py","file_name":"soft_matting.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28527525697","text":"import telebot\nfrom time import sleep\n\n\nBOT_TOKEN = '5019796180:AAHS0Ctq0_IHFdpr4_0eUya6gKKz-OvUMcY'\nCHANNEL_NAME = '@wash_notify'\n\nbot = telebot.TeleBot(BOT_TOKEN)\n\ndef send_message(instance):\n slot = instance.slot.time_range\n code = instance.code\n\n msg = f'Новая запись на {slot}! \\nКод записи - {code}'\n bot.send_message(CHANNEL_NAME, msg)\n sleep(1)\n","repo_name":"ftz-team/pony18974-back","sub_path":"server/core/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14990251253","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 10 07:54:27 2020\n\n@author: DELL\n\"\"\"\nfrom typing import BinaryIO\nimport numpy as np\nimport methods\nfrom sklearn.svm import SVC\nfrom data.Vertebral_column import load_data\nfrom sklearn.metrics import classification_report\n\ndef fit(X, y, M = 10, C = None , instance_categorization = False, proposed = False, theta=None):\n '''\n Input:\n X: data\n y: label\n M: Adaboost loops\n instance_categorization is boolean which means use or not use instance categorization\n Output H is a models of adaboosts , which is sign func of sum of M loops SVM\n '''\n #Xac dinh number of data va length of feature\n N, d = X.shape\n # initial weight adjustment and instance categorization\n #W_ada = methods.intinitialization_weight_adjustment(X, y, proposed)\n W_ada = methods.intinitialization_weight_adjustment(X, y, proposed, theta)\n # W_ada = methods.intinitialization_weight_adjustment(N)\n #Creat list of each models svm after adaboost\n w = []\n b = []\n #creat list of cofident\n alpha = []\n if instance_categorization is True:\n C_ada = methods.intinitialization_instance_categorization(N)\n for i in range(M):\n # Creat model\n WC = W_ada * C_ada\n clf = SVC.fit(kernel= 'linear', C = 10000, class_weight = WC) \n wi = clf.coef_\n bi = clf.intercept_[0]\n # Append wi and bi to the list\n w.append(wi)\n b.append(bi)\n #predict the model\n pred_i = methods.predict_svm(X, wi, bi)\n # Find true, false index after training svm\n true_index, false_index,false_index_P,false_index_N = methods.find_true_false_index(y, pred_i)\n # Compute i-th confident and append to the alpha\n alpha_i = methods.confident(W_ada,false_index,false_index_P,false_index_N)\n alpha.append(alpha_i)\n # Update weight adjustment and instance categorization\n W_ada = methods.update_weight_adjustment(W_ada, alpha_i,true_index, false_index)\n C_ada = methods.update_instance_categorization(X, y, wi, bi)\n else:\n for i in range(M):\n # Creat model\n \n clf = SVC(kernel= 'linear', C = 10000)\n clf.fit(X, y, W_ada)\n wi = clf.coef_.flatten()\n bi = clf.intercept_[0]\n # Append wi and bi to the list \n w.append(wi)\n b.append(bi)\n # Predict the model \n pred_i = methods.predict_svm(X, wi, bi)\n # Find true, false index after training svm\n true_index, false_index, false_index_P,false_index_N = methods.find_true_false_index(y, pred_i)\n # Compute i_th confident and append to the alpha\n alpha_i = methods.confident(W_ada,false_index,false_index_P,false_index_N)\n alpha.append(alpha_i)\n # Update weight adjustment\n W_ada = methods.update_weight_adjustment(W_ada, alpha_i,true_index,false_index)\n return w, b, alpha \n\ndef predict(X, w, b, alpha,M =10 ):\n H = np.zeros(X.shape[0])\n for i in range (M):\n H += alpha[i]*(X.dot(w[i]) +b[i])\n return np.sign(H)\n\n# X_train, y_train,X_test, y_test = load_data()\n# w, b, alpha = fit(X_train, y_train)\n# pred_y = predict(X_test, w, b, alpha)\n# print(classification_report(y_test, pred_y))\n\n\n","repo_name":"HaiYen-IT/ImAdaBoost","sub_path":"madaboost/adaboost_svm.py","file_name":"adaboost_svm.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19654321378","text":"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport numpy as np\nimport torch\n\nfrom jina.executors.decorators import batching, as_ndarray\nfrom jina.executors.encoders.frameworks import BaseTorchEncoder\nfrom jina.executors.devices import TorchDevice\n\n\nclass CLIPTextEncoder(BaseTorchEncoder):\n \"\"\"Encode ``Document`` content from a `np.ndarray` (of strings) of length `BatchSize` into\n a `np.ndarray` of shape `Batchsize x EmbeddingDimension`. \n\n Internally, :class:`CLIPImageEncoder` wraps the `CLIP` model from `https://github.com/openai/CLIP`.\n\n :param model_name: The name of the model. Supported models include ``ViT-B/32`` and ``RN50``.\n :param args: Additional positional arguments.\n :param kwargs: Additional positional arguments.\n \"\"\"\n\n def __init__(self, model_name: str = 'ViT-B/32',\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.model_name = model_name\n\n def post_init(self):\n \"\"\"Load a model from clip specified in `model_name`. \"\"\"\n import clip\n model, _ = clip.load(self.model_name, self.device)\n self.model = model\n\n @batching\n @as_ndarray\n def encode(self, content: 'np.ndarray', *args, **kwargs) -> 'np.ndarray':\n \"\"\"Transform a `np.ndarray` of strings of length `BatchSize` into\n a `np.ndarray` of shape `Batchsize x EmbeddingDimension`.\n\n :param content: A `np.ndarray` of strings.\n :param args: Additional positional arguments.\n :param kwargs: Additional positional arguments.\n :return: A `BachSize x EmbeddingSize` numpy `ndarray`.\n \"\"\"\n import clip\n input_torchtensor = clip.tokenize(content)\n if self.on_gpu:\n input_torchtensor = input_torchtensor.cuda()\n\n with torch.no_grad():\n embedded_data = self.model.encode_text(input_torchtensor)\n\n embedded_data = embedded_data.cpu().numpy()\n return embedded_data\n","repo_name":"jina-ai/jina-hub","sub_path":"encoders/nlp/CLIPTextEncoder/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"2"} +{"seq_id":"3472139315","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" # 指定GPU\nfrom dataloader import getloader\nfrom train import train\nfrom model import LanguageEmbedding\nfrom config import *\ndef set_seed(seed):\n torch.set_default_tensor_type('torch.FloatTensor') \n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed) \n torch.set_default_tensor_type('torch.cuda.FloatTensor') \n torch.backends.cudnn.deterministic = True \n torch.backends.cudnn.benchmark = False \n\nif __name__ == '__main__':\n args = get_args()\n set_seed(args.seed)\n batch_size = args.batch_size\n train_loader = getloader(mode='train', batch_size=batch_size, shuffle=True)\n print('Training data loaded!')\n valid_loader = getloader(mode='valid', batch_size=batch_size, shuffle=False)\n print('Validation data loaded!')\n print('Finish loading the data....')\n\n # torch.autograd.set_detect_anomaly(True)\n model = LanguageEmbedding()\n train_losses, train_acces, eval_acces, eval_losses, best_f1=train(args,train_dataloader=train_loader,valid_dataloader=valid_loader,model=model)\n print(f'best f1-score on dev set:{best_f1}')","repo_name":"chen-kezhou/FAD-text","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74305941165","text":"# coding=UTF-8\r\nfrom django.conf.urls import url\r\nfrom . import views\r\n\r\n#视图函数命名空间\r\napp_name = 'blog'\r\nurlpatterns = [\r\n\turl(r'^$', views.IndexViews.as_view(), name='index'),\r\n\t#(?P[0-9]+)命名捕获组,从用户访问的URL里把括号内匹配的字符串捕获并作为关键字参数传给其对应的views函数detail\r\n\t#例如用户访问past/255/,事实上detail调用时是这样:detail(request, pk=255)\r\n\t#这就是在URL中捕获文章的id\r\n\turl(r'^posts/$', views.PostsViews.as_view(), name='posts'),\r\n\turl(r'^about/$', views.about, name='about'),\r\n\turl(r'^contact/$', views.contact, name='contact'),\r\n\turl(r'^post/(?P[0-9]+)/$', views.PostDetailView.as_view(), name='detail'),\r\n\t#括号括起来的是命名组参数,django会从用户访问的URL中提取这两个参数的值,然后传递给其对应的视图函数。\r\n\turl(r'^archives/(?P[0-9]{4})/(?P[0-9]{1,2})/$', views.ArchivesViews.as_view(), name='archives'),\r\n\turl(r'^category/(?P[0-9]+)/$', views.CategoryViews.as_view(), name='category'),\r\n\turl(r'^author/(?P[0-9]+)/$', views.author, name='author'),\r\n\turl(r'^tag/(?P[0-9]+)/$', views.tag, name='tag')\r\n]\r\n","repo_name":"derogg1991/My_Blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23495038642","text":"# -*- coding: UTF-8 -*-\n# ------------------------------------------------------------------------------\n# Filename: uncovr.py\n# Author: huangbin@pset.suntec.net\n# Date: 2019.12.11\n# ------------------------------------------------------------------------------\nimport os\nimport sys\nimport re\nimport time\nimport argparse\nimport zipfile\nimport pandas as pd\nimport numpy as np\nimport xml.dom.minidom as minidom\nfrom suntest.config import settings\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n\nfrom itertools import groupby\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef zip_files(filename_list, zip_name):\n zip = zipfile.ZipFile( zip_name, 'w', zipfile.ZIP_DEFLATED )\n for filename in filename_list:\n logger.debug('压缩文件%s' % filename)\n zip.write(filename)\n\n zip.close()\n logger.info('压缩文件完成。')\n\n\ndef get_groupby_num_list(num_list):\n groupby_num_list = list()\n\n fun = lambda x: x[1]-x[0]\n for k, g in groupby(enumerate(num_list), fun):\n l1 = [j for i, j in g]\n groupby_num_list.append(l1)\n\n return groupby_num_list\n\n\ndef read_source_file(source_file):\n contents = str()\n\n try:\n with open(source_file, \"ro\") as fb:\n contents = fb.readlines()\n except Exception as e:\n print(e)\n finally:\n return contents\n\n\ndef get_hit_line_source_code(hit_lines, contents, source_file):\n hit_lines = sorted(hit_lines)\n if hit_lines[-1] > len(contents):\n logger.error(\"统计行数%s已经超过了源文件%s总行数%d.\" % (hit_lines[-1], source_file, len(contents)))\n return str()\n hit_indexs = [int(hit_line) - 1 for hit_line in hit_lines]\n\n try:\n for hit_index in hit_indexs:\n if \"<<<\" in contents[hit_index]:\n continue\n contents[hit_index] = \"<<< \" + contents[hit_index]\n except Exception as e:\n print(e)\n\n hit_min_index = hit_indexs[0]\n hit_max_index = hit_indexs[len(hit_indexs) - 1]\n source_code = str('\\n'.join(contents[(hit_min_index - 5):(hit_max_index + 5)]))\n\n return source_code\n\n\ndef parse_coverage_xml_report(coverage_report):\n logger.info(\"开始解析覆盖率报告%s.\" % coverage_report)\n condition_coverage_hit_record = dict()\n condition_coverage_data_item = tuple()\n line_coverage_hit_record = dict()\n line_coverage_data_item = tuple()\n condition_coverage_data_list = list()\n line_coverage_data_list = list()\n condition_coverage_data_head = [\"source_file\", \"source_code\", \"condition_coverage\", \"line_number\"]\n line_coverage_data_head = [\"source_file\", \"source_code\", \"line_number\"]\n condition_coverage_df = pd.DataFrame()\n line_coverage_df = pd.DataFrame()\n\n if not os.path.exists(coverage_report):\n logger.error(\"覆盖率报告%s不存在。\" % coverage_report)\n return (condition_coverage_df, line_coverage_df)\n\n try:\n tree = ET.parse(coverage_report)\n root = tree.getroot()\n sources = root.find(\"sources\")\n source = sources.find(\"source\")\n repository = source.text\n packages = root.find(\"packages\")\n for package in packages.findall(\"package\"):\n classes = package.find(\"classes\")\n for class_element in classes.findall(\"class\"):\n filename = class_element.get(\"filename\")\n lines = class_element.find(\"lines\")\n for line in lines.findall(\"line\"):\n source_file = source_file_rmI = os.path.join(repository, filename)\n # bug of jacoco report xml\n if re.search(r'I*.java', os.path.basename(source_file)):\n source_file_rmI = os.path.join(os.path.dirname(source_file), os.path.basename(source_file)[1:])\n if not os.path.exists(source_file_rmI) and not os.path.exists(source_file):\n logger.error(\"源代码文件%s不存在。\" % source_file)\n continue\n if line.get(\"branch\") == \"true\":\n condition_coverage = line.get(\"condition-coverage\")\n line_number = int(line.get(\"number\"))\n if condition_coverage.find(\"100%\"):\n contents = read_source_file(source_file)\n source_code = get_hit_line_source_code([line_number], contents, source_file)\n condition_coverage_data_item = (source_file, source_code, condition_coverage, line_number)\n condition_coverage_data_list.append(condition_coverage_data_item)\n if line.get(\"hits\") == \"0\":\n line_number = int(line.get(\"number\"))\n if source_file not in line_coverage_hit_record.keys():\n line_coverage_hit_record[source_file] = list()\n line_coverage_hit_record[source_file].append(line_number)\n except Exception as e:\n logger.exception(\"解析覆盖率报告发生异常%s\" % e)\n return (condition_coverage_df, line_coverage_df)\n\n for source_file,hit_lines in line_coverage_hit_record.items():\n groupby_hit_lines = get_groupby_num_list(hit_lines)\n contents = read_source_file(source_file)\n for lines in groupby_hit_lines:\n source_code = get_hit_line_source_code(lines, contents, source_file)\n line_coverage_data_item = (source_file, source_code, lines)\n line_coverage_data_list.append(line_coverage_data_item)\n\n if condition_coverage_data_list:\n condition_coverage_df = pd.DataFrame.from_records(condition_coverage_data_list, columns=condition_coverage_data_head)\n else:\n logger.error(\"未覆盖条件覆盖率数据为空.\")\n\n if line_coverage_data_list:\n line_coverage_df = pd.DataFrame.from_records(line_coverage_data_list, columns=line_coverage_data_head)\n else:\n logger.error(\"未覆盖行覆盖率数据为空.\")\n\n return (condition_coverage_df, line_coverage_df)\n\n\ndef generate_coverage_xls_report(report, condition_coverage_df, line_coverage_df):\n writer = pd.ExcelWriter(report, engine='xlsxwriter')\n condition_coverage_df.to_excel(writer, sheet_name='Condition', index=False)\n line_coverage_df.to_excel(writer, sheet_name='Line', index=False)\n\n workbook = writer.book\n head_format = workbook.add_format({'align': 'center', 'bold': 1})\n source_file_format = workbook.add_format({'align': 'left', 'valign': 'vcenter', 'bold': 0})\n source_code_format = workbook.add_format({'align': 'left', 'text_wrap': 1 })\n condition_coverage_format = line_number_format = workbook.add_format({'align': 'center', 'valign': 'vcenter', 'text_wrap' : 1, 'bold': 0})\n\n worksheet = writer.sheets['Condition']\n\n # source_file\n worksheet.set_column('A:A', 70, condition_coverage_format)\n # source_code\n worksheet.set_column('B:B', 100, source_code_format)\n # condition_coverage\n worksheet.set_column('C:C', 20, condition_coverage_format)\n # line_number\n worksheet.set_column('D:D', 14, line_number_format)\n\n\n worksheet = writer.sheets['Line']\n\n # source_file\n worksheet.set_column('A:A', 70, source_file_format)\n # source_code\n worksheet.set_column('B:B', 100, source_code_format)\n # line_number\n worksheet.set_column('C:C', 14, line_number_format)\n\n try:\n default_encoding = sys.getdefaultencoding()\n if default_encoding != settings.encode:\n reload(sys)\n sys.setdefaultencoding(settings.encode)\n writer.save()\n except UnicodeDecodeError as e:\n logger.exception(e)\n finally:\n reload(sys)\n sys.setdefaultencoding(default_encoding)\n\n\ndef capture_uncovered_code(coverage_xml_report, uncovr_report):\n logger.info(\"开始生成未覆盖代码抽出报告。\")\n (condition_coverage_df, line_coverage_df) = parse_coverage_xml_report(coverage_xml_report)\n generate_coverage_xls_report(uncovr_report, condition_coverage_df, line_coverage_df)\n\n return True\n","repo_name":"berniehuang/autotest","sub_path":"suntest/suntest_unittest/report/uncovr.py","file_name":"uncovr.py","file_ext":"py","file_size_in_byte":8146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5536254405","text":"import tkinter\r\nimport random\r\n\r\n# Initiliaze the main window\r\nroot = tkinter.Tk()\r\nroot.geometry('800x500')\r\nroot.title('Just Rolling the Dice')\r\n\r\n# A label which displays the dice\r\nlabel = tkinter.Label(root, text='', font=('Courier', 260))\r\n\r\n# This function is activated whenever the button is pressed\r\ndef roll_dice():\r\n # unicode character strings for the dice\r\n dice = ['\\u2680', '\\u2681', '\\u2682', '\\u2683', '\\u2684', '\\u2685'] # These are the unicodes\r\n label.configure(text=f'{random.choice(dice)} {random.choice(dice)}') # Display all the photos of dice randomly\r\n label.pack()\r\n\r\n# button\r\nbutton = tkinter.Button(root, text='roll dice', foreground='red', command=roll_dice) # Creating a button\r\n\r\n\r\nbutton.pack() # Keep the Button in the main Window\r\n\r\n# call the mainloop of Tk\r\n# keeps window open i.e the program running\r\nroot.mainloop()\r\n","repo_name":"SauravBhosale/DiceSimulator_using_python","sub_path":"Dice.py","file_name":"Dice.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15562877013","text":"from scipy.constants import c\nimport numpy as np\n# Import the relevant structures in FBPIC\nfrom fbpic.main import Simulation\nfrom fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic\nfrom fbpic.lpa_utils.bunch import add_elec_bunch_gaussian\n\n# Set the seed since the gaussian is drawn randomly\nnp.random.seed(0)\n\n# The simulation box\nNz = 400 # Number of gridpoints along z\nzmax = 0.e-6 # Length of the box along z (meters)\nzmin = -40.e-6\nNr = 100 # Number of gridpoints along r\nrmax = 100.e-6 # Length of the box along r (meters)\nNm = 2 # Number of modes used\nn_order = 32 # Order of the stencil\n\n# Bunch parameters\nsig_r = 3.e-6\nsig_z = 3.e-6\nn_emit = 1.e-6\ngamma0 = 15.\nsig_gamma = 1.\nQ = 10.e-12\nN = 100000\ntf = 0\nzf = -20.e-6\n\n# The simulation timestep\ndt = (zmax-zmin)/Nz/c # Timestep (seconds)\n\n# Initialize the simulation object\nsim = Simulation( Nz, zmax, Nr, rmax, Nm, dt,\n 0, 0, 0, 0, 2, 2, 4, 0., zmin=zmin,\n n_order=n_order, boundaries={'z':'open', 'r':'reflective'} )\n# Configure the moving window\nsim.set_moving_window( v=c )\n# Suppress the particles that were intialized by default and add the bunch\nsim.ptcl = [ ]\nelec = add_elec_bunch_gaussian( sim, sig_r, sig_z, n_emit, gamma0, sig_gamma,\n Q, N, tf, zf, symmetrize=True )\n# Set the diagnostics\nsim.diags = [ FieldDiagnostic(10, sim.fld, comm=sim.comm),\n ParticleDiagnostic(10, species={'elec':elec}, comm=sim.comm) ]\n# Perform one simulation step (essentially in order to write the diags)\nsim.step(1)\n","repo_name":"fbpic/fbpic","sub_path":"tests/unautomated/test_space_charge_gaussian.py","file_name":"test_space_charge_gaussian.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"2"} +{"seq_id":"9655037235","text":"import discord\nimport datetime\nimport pytz\nimport logging\nimport deepl\nimport pysaucenao\nimport deep_translator\nimport requests\nimport aiohttp\nimport genshin\nimport orjson\nimport asyncio\nimport platform\nfrom discord.commands import SlashCommandGroup\nfrom discord.ext import commands\n\nsingapore_tz = pytz.timezone(\"Asia/Singapore\")\nstart_time = datetime.datetime.now(singapore_tz)\n\nclass UtilityMenu(commands.Cog):\n def __init__(self, bot, version):\n self.bot = bot\n self.version = version\n \n utility = SlashCommandGroup(\"utility\", \"Various commands to get more information on Gura!\")\n \n @utility.command(name=\"ping\", description=\"Gura's Latency\")\n async def ping(self, ctx):\n await ctx.respond(\n embed=discord.Embed(\n description=f\"Ping: {self.bot.latency*1000:.2f}ms\", color=discord.Color.purple()\n ),\n ephemeral=True,\n )\n \n @utility.command(name=\"about\", description=\"Returns information about Gura🦈\")\n async def about(self, ctx):\n text_channel = 0\n # voice_channel = 0\n # stage_channel = 0\n\n for channel in self.bot.get_all_channels():\n if isinstance(channel, discord.TextChannel):\n text_channel += 1\n # elif isinstance(channel, discord.VoiceChannel):\n # voice_channel += 1\n # elif isinstance(channel, discord.StageChannel):\n # stage_channel += 1\n \"\"\"Displays the bot uptime\"\"\"\n delta_uptime = datetime.datetime.now(singapore_tz) - start_time\n days, seconds = delta_uptime.days, delta_uptime.seconds\n hours = seconds // 3600\n minutes = (seconds % 3600) // 60\n seconds = seconds % 60\n uptime = f\"{hours:02d}:{minutes:02d}:{seconds:02d} {start_time.strftime('%d/%m/%Y')} ({days} days)\"\n\n embed = discord.Embed(\n title=\"About Gawr Gura🦈\",\n description=\"This is the information tab which shows you stats about Gura\",\n color=discord.Color.og_blurple(),\n )\n embed.set_thumbnail(\n url=\"https://img.guildedcdn.com/MediaChannelUpload/4b997b24da52431951d461c877f32435-Full.png\"\n )\n embed.set_author(\n name=\"Kurokami\",\n url=\"https://github.com/k3rokami\",\n icon_url=\"https://avatars.githubusercontent.com/u/50738510\",\n )\n embed.add_field(\n name=\"<:Stats:1091947763337539658> Statistics\",\n value=f\"\\n<:Servers:1091947846078566563> Servers : `{len(self.bot.guilds)}`\"\n f\"\\n<:Users:1091947828462506044> Users : `{len(self.bot.users)}`\"\n f\"\\n<:Text_Channel:1091947813249765416> Text channels : `{text_channel}`\"\n # f\"\\n<:Voice_Channel:1091947799710543884> Voice channels : `{voice_channel}`\"\n # f\"\\n<:Stage_Channel:1091947859156422816> Stage channels : `{stage_channel}`\"\n f\"\\n<:Commands:1091947872670470216> Commands : `{len(self.bot.commands)+len(self.bot.application_commands)}`\"\n f\"\\n<:GawrGuraHeart:1092448958816722995> Gawr Gura's Version : `{self.version.capitalize()}`\"\n f\"\\n<:Uptime:1091955760969293864> Uptime : `{uptime}`\",\n inline=False,\n )\n # embed.add_field(\n # name=\"Gura\",\n # value=f\"`Guilds {len(bot.guilds)}`\\n`User Count {len(bot.users)}`\\n`Build Version {VERSION.capitalize()}`\\n`Up since {uptime}`\",\n # inline=False,\n # )\n embed.add_field(\n name=\"Tools\",\n value = f\"`Python {platform.python_version()}`\\n`OS {(platform.system() + ' ' + platform.release())}`\"\n )\n embed.add_field(\n name=\"Main Modules\",\n value=f\"`Py-Cord {'.'.join(discord.__version__.split('.')[0:3])}`\\n`PySauceNao {pysaucenao.__version__}`\\n`DeepL {deepl.__version__}`\\n`Deep Translator {deep_translator.__version__}`\\n`Genshin {genshin.__version__}`\",\n )\n embed.add_field(\n name=\"External Modules\",\n value=f\"`Logging {logging.__version__}`\\n`PyTz {pytz.__version__}`\\n`Request {requests.__version__}`\\n`OrJson {orjson.__version__}`\\n`AioHttp {aiohttp.__version__}`\",\n )\n embed.add_field(\n name=\"Author\",\n value=\"`Kurokami`\\n[Github | Kurokami](https://github.com/k3rokami)\",\n )\n embed.add_field(\n name=\"Github Repository\",\n value=\"Found an issue? Let me know and create an entry on the repo!\\n[Gawr Gura🦈](https://github.com/k3rokami/Gura-Bot) is under the [MIT](https://github.com/k3rokami/Gura-Bot/blob/main/LICENSE) license\",\n inline=False,\n )\n # embed.set_footer(\n # text=f\"Requested by {ctx.interaction.user.name}\",\n # icon_url=ctx.interaction.user.display_avatar.url,\n # )\n await ctx.response.send_message(embed=embed, ephemeral=False)\n\n\n @utility.command(name=\"delete-messages\", description=\"Delete messages sent by the bot in DM channel.\")\n async def delete_messages(self, ctx, count: int = 1):\n # Check if the command was executed in a DM channel\n if ctx.channel.type in (discord.ChannelType.private, discord.ChannelType.group):\n # Get the bot's messages in the DM channel\n bot_messages = [\n msg\n for msg in await ctx.channel.history(limit=None).flatten()\n if msg.author == self.bot.user or isinstance(msg, discord.Message) and msg.embeds and msg.embeds[0].author and msg.embeds[0].author.name == self.bot.user.name\n ]\n # Delete the specified number of messages, defaulting to 1 if no count was provided\n count = min(count, len(bot_messages))\n if count == 0:\n await ctx.respond(\"No bot messages to delete.\", ephemeral=True)\n return\n for message in bot_messages[:count]:\n await message.delete()\n # Respond to the user to confirm that the messages were deleted\n await ctx.respond(f\"Deleted {count} bot message(s).\", ephemeral=True)\n else:\n await ctx.respond(\"This command can only be used in DMs.\", ephemeral=True)\n\n @utility.command(name=\"user\", description=\"Get member's Information\")\n async def user(self, interaction: discord.Interaction, member: discord.Member = None):\n if interaction.guild is None:\n embed = discord.Embed(description=interaction.user.mention, color=discord.Color.blue())\n embed.set_author(name=str(interaction.user), icon_url=interaction.user.avatar.url)\n embed.set_thumbnail(url=interaction.user.avatar.url)\n embed.add_field(\n name=\"**Registered**\", value=f\"{interaction.user.created_at.strftime('%a, %d %b %Y %I:%M %p')}\"\n )\n embed.set_footer(text=\"ID: \" + str(interaction.user.id))\n await interaction.response.send_message(embed=embed)\n else:\n if member is None:\n member = interaction.user\n date_format = \"%a, %d %b %Y %I:%M %p\"\n embed = discord.Embed(description=member.mention, color=discord.Color.blue())\n embed.set_author(name=str(member), icon_url=member.avatar.url)\n embed.set_thumbnail(url=member.avatar.url)\n embed.add_field(\n name=\"**Joined**\", value=f\"{member.joined_at.strftime(date_format)}\"\n )\n members = sorted(interaction.guild.members, key=lambda m: m.joined_at)\n embed.add_field(name=\"**Join position**\", value=f\"{str(members.index(member)+1)}\")\n embed.add_field(\n name=\"**Registered**\", value=f\"{member.created_at.strftime(date_format)}\"\n )\n if len(member.roles) > 1:\n role_string = \" \".join([r.mention for r in member.roles][1:])\n embed.add_field(\n name=f\"**Roles [{len(member.roles) - 1}]**\",\n value=f\"{role_string}\",\n inline=False,\n )\n perm_string = \", \".join(\n [str(p[0]).replace(\"_\", \" \").title() for p in member.guild_permissions if p[1]]\n )\n if perm_string:\n embed.add_field(name=\"**Guild permissions**\", value=f\"{perm_string}\", inline=False)\n embed.set_footer(text=\"ID: \" + str(member.id))\n await interaction.response.send_message(embed=embed)\n\n @utility.command(name=\"banner\", description=\"Get member's Banner\")\n async def banner(self, interaction: discord.Interaction, member: discord.Member = None):\n if member is None:\n member = interaction.user\n # check if user has a banner and fetch it\n try:\n user = await self.bot.fetch_user(member.id)\n banner_url = user.banner.url # The URL of the banner\n except:\n await interaction.response.send_message(\n \"> **The user doesn't have a banner.**\", ephemeral=True\n )\n # sending the banner\n userAvatar = member.avatar.url\n embed = discord.Embed(\n title=\"Banner Link\", url=banner_url, color=discord.Color.purple()\n )\n embed.set_author(name=member.name, icon_url=userAvatar)\n embed.set_image(url=banner_url)\n embed.set_footer(\n text=f\"requested by {interaction.user}\", icon_url=interaction.user.avatar.url\n )\n await interaction.response.send_message(embed=embed)\n\n\n @utility.command(name=\"avatar\", description=\"Get member's Avatar\")\n async def avatar(self, interaction: discord.Interaction, member: discord.Member = None):\n if not member:\n member = interaction.user\n\n user_avatar = member.avatar.url\n embed = discord.Embed(color=discord.Color.dark_gold())\n embed.title = \"**Profile Avatar Link**\"\n embed.url = user_avatar\n embed.set_author(name=member.name, icon_url=user_avatar)\n embed.set_image(url=user_avatar)\n embed.set_footer(\n text=f\"requested by {interaction.user}\", icon_url=interaction.user.avatar.url\n )\n\n display_avatar = member.display_avatar.url\n\n disavatar_button = discord.ui.Button(\n label=\"Server's Profile Avatar\", style=discord.ButtonStyle.green\n )\n\n async def show_disavatar(button_interaction: discord.Interaction):\n if button_interaction.user != interaction.user:\n return await button_interaction.response.send_message(\n \"This avatar is not for you!\", ephemeral=True\n )\n if not display_avatar or display_avatar == user_avatar:\n embed.title = \"**Profile Avatar Link**\"\n embed.url = display_avatar\n embed.description = \"**This user doesn't have a server avatar.**\"\n disavatar_button.style = discord.ButtonStyle.gray\n view = discord.ui.View()\n else:\n embed.title = \"**Server's Profile Avatar Link**\"\n embed.url = display_avatar\n embed.set_image(url=display_avatar)\n disavatar_button.style = discord.ButtonStyle.gray\n view = discord.ui.View()\n await button_interaction.message.edit(embed=embed, view=view)\n\n disavatar_button.callback = show_disavatar\n view = discord.ui.View(disavatar_button)\n await interaction.response.send_message(embed=embed, view=view)\n\n\n @utility.command(name=\"private_channel\", description=\"Makes a temporary private channel.\")\n async def prvchannel(self, interaction: discord.Interaction, time: str, channel_name: str):\n # Check if the command was used in a server\n if not interaction.guild:\n return await interaction.response.send_message(\n \"> This command can only be used in a server.\", ephemeral=True\n )\n\n guild = interaction.guild\n category = discord.utils.get(interaction.guild.categories)\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(read_messages=False),\n guild.me: discord.PermissionOverwrite(read_messages=True),\n }\n if time:\n get_time = {\n \"s\": 1,\n \"m\": 60,\n \"h\": 3600,\n \"d\": 86400,\n \"w\": 604800,\n \"mo\": 2592000,\n \"y\": 31104000,\n }\n timer = time\n a = time[-1]\n b = get_time.get(a)\n c = time[:-1]\n try:\n int(c)\n except:\n return await interaction.response.send_message(\n \"> **Type time and time unit s=Seconds,m=Minutes,h=Hours,d=Days,w=Weeks,mo=Month,y=Years correctly**\",\n ephemeral=True,\n )\n try:\n sleep = int(b) * int(c)\n except:\n return await interaction.response.send_message(\n \"> **Type time and time unit s=Seconds,m=Minutes,h=Hours,d=Days,w=Weeks,mo=Month,y=Years correctly**\",\n ephemeral=True,\n )\n channel = await guild.create_text_channel(\n name=channel_name, overwrites=overwrites, category=category\n )\n embed = discord.Embed(\n title=\"Channel Created! ✅\",\n description=f\"Private Channel **{channel_name}** has been created for **{timer}**\",\n color=discord.Color.nitro_pink(),\n )\n await interaction.response.send_message(embed=embed, ephemeral=False)\n await asyncio.sleep(int(sleep))\n await channel.delete()\n embed = discord.Embed(\n title=\"Channel Deleted!\",\n description=f\"Private Channel **{channel_name}** has been deleted after **{timer}**\",\n color=discord.Color.nitro_pink(),\n )\n await interaction.followup.send(embed=embed, ephemeral=True)\n\n\n @utility.command(name=\"lock\", description=\"Locks a channel.\")\n async def lock(self, interaction: discord.Interaction, channel: discord.TextChannel = None):\n channel = channel or interaction.channel\n overwrite = channel.overwrites_for(interaction.guild.default_role)\n if overwrite.send_messages is False:\n return await interaction.response.send_message(\n \"> The channel is already locked\", ephemeral=True\n )\n overwrite.send_messages = False\n await channel.set_permissions(interaction.guild.default_role, overwrite=overwrite)\n embed = discord.Embed(\n title=\"🔒 ┃ Channel Locked!\",\n description=f\"**{channel.mention}** has been locked.\",\n color=discord.Color.brand_red(),\n )\n await interaction.response.send_message(embed=embed)\n\n\n @utility.command(name=\"unlock\", description=\"Unlocks a locked channel.\")\n async def unlock(self, interaction: discord.Interaction, channel: discord.TextChannel = None):\n channel = channel or interaction.channel\n overwrite = channel.overwrites_for(interaction.guild.default_role)\n if overwrite.send_messages is True:\n return await interaction.response.send_message(\n \"> The channel is already unlocked\", ephemeral=True\n )\n overwrite.send_messages = True\n await channel.set_permissions(interaction.guild.default_role, overwrite=overwrite)\n embed = discord.Embed(\n title=\"🔓 ┃ Channel Unlocked!\",\n description=f\"**{channel.mention}** has been unlocked.\",\n color=discord.Color.dark_teal(),\n )\n await interaction.response.send_message(embed=embed)\n \n @utility.command(name=\"pfp\", description=\"Change bot's pfp using an image URL\")\n @commands.is_owner()\n async def pfp(ctx, attachment_url=None):\n if attachment_url is None and not ctx.message.attachments:\n return await ctx.send(\n f\"Please provide an Image URL or attach an Image for this command\"\n )\n else:\n await ctx.send(\n f\"Profile picture changed successfully\"\n )\n if attachment_url is None:\n attachment_url = ctx.message.attachments[0].url\n\n async with aiohttp.ClientSession() as session:\n async with session.get(attachment_url) as response:\n await bot.user.edit(avatar=await response.read())\n \ndef setup(bot):\n bot.add_cog(UtilityMenu(bot))","repo_name":"k3rokami/Gura-Bot","sub_path":"cogs/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":16602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"25488084028","text":"# 1. Напечатать максимум из трёх чисел\r\ndef max_3(a,b,c):\r\n return max(a,b,c)\r\nprint(max_3(8,10,3))\r\n\r\n# 2. Дан год (число), определить, является ли он високосным (делится на 4 и не делится на 100)\r\ndef is_vic(year):\r\n if year%4==0 and year%100!=0:\r\n return 'Yes'\r\n else:\r\n return 'No'\r\nprint(is_vic(2020))\r\nprint(is_vic(1990))\r\nprint(is_vic(2001))\r\n\r\n# 3. Даны длины двух катетов прямоугольного треугольника, посчитать гипотенузу (c=sqrt(a^2+b^2)\r\ndef gip(a,b):\r\n return (a**2+b**2)**(1/2)\r\nprint(gip(12,13))\r\n\r\n# 4. Проверить, что число является простым (делится без остатка только на 1 и на себя)\r\ndef simple_number(a):\r\n for i in range(2,a):\r\n if a%i==0:\r\n return 'not simple'\r\n return 'simple'\r\nprint(simple_number(13))\r\n\r\n# 5. Даны фамилия, имя и отчество. Записать их в сокращённом виде\r\ndef short_name(name,surname,family_name):\r\n result=surname+' '+name[0]+'. '+family_name[0]+'.'\r\n return result\r\nprint(short_name('Ivan','Ivanov','Ivanovich'))\r\n\r\n# 6. Проверить, что список упорядочен в одну из сторон\r\ndef is_sorted(data):\r\n ordered=sorted(data)\r\n if ordered==data:\r\n return True\r\n ordered.reverse() #либо ordered=reversed(ordered)\r\n if ordered==data:\r\n return True\r\n return False\r\ndata=[1,2,3]\r\nprint(is_sorted(data))\r\ndata.reverse()\r\nprint(is_sorted(data))\r\ndata[0]=0\r\nprint(is_sorted(data))\r\n\r\n# 7. Дан список, вернуть список, состоящий только из различных значений (порядок не имеет значения)\r\ndef distinct(data):\r\n elements=set()\r\n for value in data:\r\n elements.add(value)\r\n return list(elements)\r\nprint(distinct([1,2,2,3,3,3,3,3,3,33,3,4]))\r\n# можно сделать так:\r\ndef distinct(data):\r\n return list(set(data))\r\nprint(distinct([1,2,2,3,3,3,3,3,3,33,3,4]))\r\n\r\n# 8. Дана дата в формате \"12.04.2019\", требуется перевести в запись \"12 апреля 2019\"\r\nmonths = {\r\n 1: 'January',\r\n 2: 'February',\r\n 3: 'March',\r\n 4: 'April',\r\n 5: 'May',\r\n 6: 'June',\r\n 7: 'July',\r\n 8: 'August',\r\n 9: 'September',\r\n 10: 'October',\r\n 11: 'November',\r\n 12: 'December'\r\n}\r\ndef format_date(date):\r\n parts=date.split('.') #появляется список вида ['12','04','2019']\r\n changed_month=months.get(int(parts[1]))\r\n if changed_month is None:\r\n return 'Not found'\r\n else:\r\n return parts[0]+' '+changed_month+' '+parts[2]\r\nprint(format_date('27.12.2019'))\r\n\r\n# 9. Дан список имён и список телефонов, сформировать словарь вида { имя: телефон }\r\ndef form_map(names,phones):\r\n name_to_phone=dict()\r\n for i in range(len(names)):\r\n name_to_phone[names[i]]=phones[i]\r\n return name_to_phone\r\nnames=['Jane','Mary']\r\nphones=[1234,5678]\r\nprint(form_map(names,phones))\r\n\r\n# 10. Дан список чисел, посчитать количество различных значений в нём\r\ndef diff(data):\r\n return len(set(data))\r\nprint(diff([1,2,2,3,3,3,3,3,3,33,3,4]))","repo_name":"NSLyuma/brunoyam","sub_path":"lesson_5.py","file_name":"lesson_5.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14711072373","text":"from sws_pythoncommands import *\r\n\r\n\r\n\r\ntrk_current = RPR_GetSelectedTrack(0,0)\r\n\r\nif \"B| \" in getname(trk_current) or \"A| \" in getname(trk_current) or \"Root| \" in getname(trk_current) :\r\n\r\n from MReaPy_AB_Listening_finish import identify_tracks\r\n trk_a = identify_tracks()[0]\r\n trk_b = identify_tracks()[1]\r\n trk_root = identify_tracks()[2]\r\n\r\n if RPR_GetMediaTrackInfo_Value(trk_a, \"B_MAINSEND\") == 0:\r\n switch = 0\r\n RPR_SetMediaTrackInfo_Value(trk_a, \"B_MAINSEND\",1)\r\n\r\n else:\r\n switch = 1\r\n RPR_SetMediaTrackInfo_Value(trk_a,\"B_MAINSEND\", 0)\r\n\r\n\r\n RPR_SetMediaTrackInfo_Value(trk_b, \"B_MAINSEND\", switch)\r\n\r\n RPR_SetOnlyTrackSelected(trk_current)\r\n\r\n\r\n\r\n\r\nelse:\r\n\r\n input = UserInput(\"A/B Comparison\", \"Shall it be? y/n\", 1, \"y\")\r\n if input == \"y\" or input == \"Y\":\r\n\r\n\r\n trk_current_name = getname(trk_current)\r\n RPR_GetSetMediaTrackInfo_String(trk_current, \"P_NAME\", f\"Root| {trk_current_name}\", 1)\r\n\r\n inserttrackabove()\r\n trk_a= RPR_GetSelectedTrack(0, 0)\r\n\r\n RPR_SetTrackColor(trk_a, 21004543)\r\n RPR_GetSetMediaTrackInfo_String(trk_a, \"P_NAME\", f\"A| {trk_current_name}\", 1)\r\n\r\n inserttrackabove()\r\n trk_b= RPR_GetSelectedTrack(0, 0)\r\n\r\n RPR_SetTrackColor(trk_b, 21004543)\r\n RPR_GetSetMediaTrackInfo_String(trk_b, \"P_NAME\", f\"B| {trk_current_name}\", 1)\r\n\r\n\r\n\r\n RPR_SetTrackSelected(trk_a, 1)\r\n RPR_SetTrackSelected(trk_b, 1)\r\n RPR_SetTrackSelected(trk_current, 1)\r\n\r\n\r\n\r\n RPR_SetMediaTrackInfo_Value(trk_a, \"B_MAINSEND\", 0)\r\n RPR_SetMediaTrackInfo_Value(trk_b, \"B_MAINSEND\", 1)\r\n RPR_SetMediaTrackInfo_Value(trk_current, \"B_MAINSEND\", 0)\r\n RPR_CreateTrackSend(trk_current, trk_b)\r\n RPR_CreateTrackSend(trk_current, trk_a)\r\n\r\n RPR_SetOnlyTrackSelected(trk_current)\r\n\r\n\r\n\r\n else:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MarlonKr/MReaPy_A-B_Listening","sub_path":"MReaPy_AB_Listening_start.py","file_name":"MReaPy_AB_Listening_start.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25177265115","text":"import json\nimport urllib2\nimport lxml.html as lh\n\nfrom actstream.models import Action\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom framebuzz.apps.api.models import Video\n\n\ndef queryset_iterator(queryset, chunksize=1000):\n import gc\n pk = 0\n last_pk = queryset.order_by('-pk')[0].pk\n queryset = queryset.order_by('pk')\n if queryset.count() < 1:\n pass\n while pk < last_pk:\n for row in queryset.filter(pk__gt=pk)[:chunksize]:\n pk = row.pk\n yield row\n gc.collect()\n\n\ndef get_client_ip(request):\n if settings.DEBUG:\n return '75.13.90.154'\n else:\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n\ndef errors_to_json(errors):\n return json.dumps(\n dict(\n (k, map(unicode, v))\n for (k, v) in errors.iteritems()\n )\n )\n\n\ndef get_share_count(service, url):\n count = 0\n url = settings.SHARE_COUNT_URLS[service] % url\n response = urllib2.urlopen(url)\n\n try:\n if service == 'google':\n doc = lh.parse(response)\n count_string = doc.xpath(\"//div[@id='aggregateCount']/text()\")\n parsed = str(count_string).lstrip(\"['\").rstrip(\"']\")\n count = int(parsed)\n elif service == 'facebook':\n buf = response.read()\n response_json = json.loads(buf)\n if len(response_json) > 0:\n count_response = response_json[0].get('share_count', 0)\n count = int(count_response)\n elif service == 'twitter':\n buf = response.read()\n response_json = json.loads(buf)\n count_response = response_json.get('count', 0)\n count = int(count_response)\n else:\n pass\n except:\n pass\n\n return count\n\n\ndef get_total_shares(path):\n final = 0\n domains = ['http://framebuzz.com', 'http://frame.bz']\n for domain in domains:\n for service, share_url in settings.SHARE_COUNT_URLS.items():\n full_url = '%s%s' % (domain, path)\n count = get_share_count(service, full_url)\n final = final + int(count)\n\n path_split = [str(s) for s in path.split('/')]\n if len(path_split) > 3:\n video_id = path_split[-2]\n video = Video.objects.get(slug=video_id)\n email_shares = Action.objects.filter(verb='shared',\n action_object_object_id=video.id)\n final = final + len(email_shares)\n return final\n\n\ndef get_pending_uploads(username):\n pending_uploads = Video.objects.exclude(\n Q(Q(fp_url=None) & Q(job_id=None))).filter(\n added_by__username=username,\n processing=True)\n return pending_uploads\n","repo_name":"droxey/framebuzz","sub_path":"framebuzz/apps/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"12845872022","text":"#!/usr/bin/env python3\nimport os\nimport matplotlib.pyplot as plt\nfrom utils import plot_train_loss, load_model\n\nmodel_list = os.listdir('models/')\n\nfor model_name in model_list:\n model = load_model(model_name)\n fig, ax = plt.subplots(figsize=(8,6))\n plot_train_loss(model, ax=ax)\n fig.savefig('plots/' + model_name.split('.pt')[0] + '_learning_curve.pdf')\n plt.close()","repo_name":"jonasvj/TFDE","sub_path":"plot_learning_curves.py","file_name":"plot_learning_curves.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74131561965","text":"import urllib3\nimport urllib\nimport json\nimport requests\nfrom .base_api import BaseApi\n\nhttp = urllib3.PoolManager()\n\n\nclass FileApi(BaseApi):\n def __init__(self, base_url):\n super().__init__(base_url)\n self.root_url = ''\n\n def upload(self, file_path):\n url = '/file/upload'\n params = {\n 'file': file_path,\n }\n return self._upload_multipart(url, params)\n\n \"\"\"\n :param file_name: 文件name\n :param store_path: 文件存储路径\n :return: 状态码\n \"\"\"\n def download(self, file_name, saved_file_path):\n url = '/file/download'\n\n res = self._download(url, file_name, saved_file_path)\n if res[0] == saved_file_path:\n return {\n 'result': 'done',\n 'saved_file_path': saved_file_path\n }\n else:\n return {\n 'result': 'failed',\n }\n\n \"\"\"\n 通过preview检测文件是否存在\n :param file_name: 文件name\n \"\"\"\n def isFileExist(self, file_name):\n url = '/file/fileExist'\n\n response = self._get_req(url + '/' + file_name, {})\n if response['result'] == 'ok':\n return response['msg']['data']\n else:\n return False","repo_name":"lingfliu/hir_crowd_abnormal_ex","sub_path":"api/file_api.py","file_name":"file_api.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28467051718","text":"from random import random, randint, shuffle\n\nclass Gene:\n\n def DNAString(self):\n s = \"\"\n for base in self.DNA:\n s = \"{0} {1}\".format(s, base)\n return s[1:]\n\n def Mutate(self, mutationMagnitude, mutationChancePerBase = 0.75):\n for i in range(0, len(self.DNA)):\n if random() < mutationChancePerBase:\n r = randint(self.min, self.max)\n self.DNA[i] = min(max(self.DNA[i] + (random() - 0.5) * r / 500 * mutationMagnitude, self.min), self.max)\n \n def __init__(self, trait, DNA, minLength, valueRange):\n self.trait = trait\n self.DNA = DNA\n self.minLength = minLength\n self.valueRange = valueRange\n self.min = self.valueRange[0]\n self.max = self.valueRange[1]\n","repo_name":"ConnorUllmann/Evolution","sub_path":"evolution+equation/gene.py","file_name":"gene.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32174468902","text":"# Importing required libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random as rd\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\nclass PCAHomework:\n def pca_analysis(self, data):\n # Mean-Centering and Scaling Data\n # Scale Function Expects Samples to be Rows\n scaled_data = StandardScaler().fit_transform(data)\n\n # Computing Covariance Matrix\n cov_matrix = np.cov(scaled_data)\n\n print(\"Covariance Matrix: \")\n print(cov_matrix)\n\n # Performing Eigen-Decomposition\n eigenval, eigenvec = np.linalg.eig(cov_matrix)\n\n print(\"Eigenvalues: \")\n print(eigenval)\n print(\"Eigenvectors: \")\n print(eigenvec)\n\n # PCA Object\n pca = PCA()\n pca.fit(scaled_data)\n pca_data = pca.transform(scaled_data)\n\n # Projecting Data onto PC Axes\n project_data = pca.inverse_transform(pca_data)\n plt.scatter(project_data[:, 0], project_data[:, 1], label = 'Projected Data')\n plt.xlabel('PC1')\n plt.ylabel('PC2')\n plt.title('Projected Data onto PC Axes')\n plt.show()\n\n # Calculating Percent Variance\n per_var = np.round(pca.explained_variance_ratio_* 100, decimals = 1)\n labels = ['PC' + str(x) for x in range(1, len(per_var) + 1)]\n\n # Scree Plot\n plt.bar(x = range(1, len(per_var) + 1), height = per_var, tick_label = labels)\n plt.ylabel('Percentage of Explained Variance')\n plt.xlabel('Principal Component')\n plt.title('Scree Plot')\n plt.show()\n\n # Scores Plot\n fig, ax = plt.subplots()\n ax.scatter(pca_data[:, 0], pca_data[:, 1], color='blue')\n ax.set_title('Scores Plot')\n ax.set_xlabel('PC1')\n ax.set_ylabel('PC2')\n plt.show()\n\n # Loadings Plot\n loadings = pca.components_\n\n fig, ax = plt.subplots()\n ax.scatter(loadings[:, 0], loadings[:, 1], color='blue')\n ax.set_title('Loadings Plot')\n ax.set_xlabel('PC1')\n ax.set_ylabel('PC2')\n for i in range(loadings.shape[0]):\n ax.text(loadings[i, 0], loadings[i, 1], 'x' + str(i + 1))\n plt.show()\n\n# my_instance = PCAHomework()\n#\n# my_instance.pca_analysis('path to data')\n","repo_name":"speercolin/BINF6210","sub_path":"Homework_2/pca_analysis.py","file_name":"pca_analysis.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18012834865","text":"from fastapi import WebSocket\nfrom manager import Packet, Device\nfrom collections import defaultdict\nfrom message import easy_send\n\n\nimport register\n\n\ndef is_world(packet:Packet, word='world', key='unit'):\n entity_is = packet.message\n\n if packet.is_json():\n entity_is =packet.get_json().get(key, None)\n\n return (entity_is or '').lower() == word\n\n\nclass SceneManager(Device):\n \"\"\"The scene manager mounts the incoming \"device\" list to detect \"world\"\n packages.\n \"\"\"\n scenes = None\n\n def __init__(self):\n self.scenes = self.scenes or {}\n self.socket_scene_list = defaultdict(set)\n\n async def digest_packet(self, packet: Packet):\n \"\"\"Check if the scene sent this message.\n \"\"\"\n if is_world(packet):\n print('New Scene\\n')\n await self.stack_scene(packet)\n\n print('SceneManager, digest_packet')\n\n async def disconnect(self, websocket):\n print('SceneManager::disconnect')\n\n deleted = await self.drop_scene_owner(websocket.uuid)\n deleted_res = register.delete_scenes(*deleted)\n await self.host.broadcast_json(_exclude=(websocket,),\n event='scenes_disconnect',\n action='disconnect',\n type='scenes',\n deleted=tuple(x for x,y in deleted_res.items() if y is True)\n )\n\n async def stack_scene(self, packet: Packet):\n \"\"\"A new scene:\n {\"new_network\":1257,\"root\":1179,\"is\":\"World\"}\n \"\"\"\n # content = packet.as_json()\n ## int ID of the scene\n # iid = content.get('root')\n\n \"\"\"Convert the incoming JSON packet to an internal event model\n \"\"\"\n new_scene = packet.as_event('NewScene')\n # int ID of the scene\n iid = new_scene.root\n\n if iid in self.scenes:\n return await self.reconnect_scene(iid, packet)\n await self.connect_scene(iid, packet)\n #await self.announce_scene(iid, packet)\n\n async def reconnect_scene(self, iid, packet):\n print('Reattach existing scene')\n return await self.connect_scene(iid, packet,\n count=self.scenes[iid]['count']+1)\n\n # async def announce_scene(self, iid, packet):\n # msg_dict = packet.get_json()\n # print('Announcing', packet.message)\n # await self.host.broadcast(packet.message, exclude=(packet.owner,))\n\n async def connect_scene(self, iid, packet, **extra):\n \"\"\"Append the scene to the existing list of manages scenes.\n \"\"\"\n entry = packet.get_json()\n entry['count'] = 0\n entry['owner'] = tuple(packet.owner.client)\n entry['uuid'] = packet.owner.uuid\n entry.update(extra)\n\n await self.add_scene_owner(packet.owner.uuid, iid)\n self.scenes[iid] = entry\n register.add_scene(iid, entry)\n\n async def drop_scene_owner(self, uuid):\n deletes = set()\n if uuid in self.socket_scene_list:\n print('deleting scenes', uuid, self.socket_scene_list[uuid])\n deletes.update(self.socket_scene_list[uuid])\n del self.socket_scene_list[uuid]\n\n return tuple(deletes)\n\n async def add_scene_owner(self,uuid, root_id):\n print('add_scene_owner', uuid, root_id)\n self.socket_scene_list[uuid].add(root_id)\n\n","repo_name":"Strangemother/python-websocket-server","sub_path":"old/v1/scene/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14696863711","text":"def printPathHelper(x, y, maze, n, solution):\n\n #Destination Cell\n if x == n - 1 and y == n - 1:\n solution[x][y] = 1\n print(solution)\n \n return\n # these are the Blocking Points\n if x < 0 or y < 0 or x >=n or y >= n or maze[x][y] == 0 or solution[x][y] == 1:\n return\n solution[x][y] = 1\n #print(\"solution first wala\",solution)\n #down\n printPathHelper(x + 1, y, maze, n, solution)\n \n #right\n printPathHelper(x, y + 1, maze, n, solution)\n \n #top\n printPathHelper(x - 1, y, maze, n, solution)\n \n #left\n printPathHelper(x, y - 1, maze, n, solution)\n solution[x][y] = 0\n## print(\"solutionnn\", solution)\n## print(\"solution\", x, '==',y)\n return\n\ndef printPath(maze):\n n = len(maze)\n \n solution = [[0 for j in range(n)]for i in range(n)]\n #print(solution)\n printPathHelper(0, 0, maze, n, solution)\n\n\nn = int(input())\nmaze = []\nfor i in range(n):\n row = [int(ele) for ele in input().split()]\n maze.append(row)\nprintPath(maze)\n\n\n\n\n","repo_name":"nilankh/QuestionsOfCoding","sub_path":"ratinamaze.py","file_name":"ratinamaze.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17352009519","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\n\ndef nada():\n pass\n\ncv2.namedWindow(\"result\")\n\n# Create trackbars for color change\n# Hue is from 0-179 for Opencv\ncv2.createTrackbar('HMin', 'result', 0, 179, nada)\ncv2.createTrackbar('SMin', 'result', 0, 255, nada)\ncv2.createTrackbar('VMin', 'result', 0, 255, nada)\ncv2.createTrackbar('HMax', 'result', 0, 179, nada)\ncv2.createTrackbar('SMax', 'result', 0, 255, nada)\ncv2.createTrackbar('VMax', 'result', 0, 255, nada)\n\n# Set default value for Max HSV trackbars\ncv2.setTrackbarPos('HMax', 'result', 179)\ncv2.setTrackbarPos('SMax', 'result', 255)\ncv2.setTrackbarPos('VMax', 'result', 255)\n\n# Initialize HSV min/max values\nhMin = sMin = vMin = hMax = sMax = vMax = 0\nphMin = psMin = pvMin = phMax = psMax = pvMax = 0\n\n\n\nwhile True:\n # Get current positions of all trackbars\n hMin = cv2.getTrackbarPos('HMin', 'result')\n sMin = cv2.getTrackbarPos('SMin', 'result')\n vMin = cv2.getTrackbarPos('VMin', 'result')\n hMax = cv2.getTrackbarPos('HMax', 'result')\n sMax = cv2.getTrackbarPos('SMax', 'result')\n vMax = cv2.getTrackbarPos('VMax', 'result')\n\n\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n\n _, frame = cap.read()\n \n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv_frame, lower, upper)\n result = cv2.bitwise_and(frame, frame, mask=mask)\n\n \n # cv2.imshow('src', frame)\n # cv2.imshow('mask', mask)\n cv2.imshow('result', result)\n\n \n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"B6Infinity/codesnippets","sub_path":"Python/OpenCV/HSV_RangeFinder.py","file_name":"HSV_RangeFinder.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"41308945079","text":"N = int(input())\nevents = []\nfor i in range(N):\n T,L = map(int, input().split())\n events.append((T,1))\n events.append((T+L,-1))\n \nevents.sort()\ngoodsin = 0\nmaxgoodsin = 0\nfor T, t in events:\n if t == 1:\n goodsin += 1\n else:\n goodsin -= 1\n maxgoodsin = max(maxgoodsin, goodsin)\n\nprint(maxgoodsin)\n","repo_name":"Librekiel/Examples","sub_path":"Tamozhnya.py","file_name":"Tamozhnya.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"28176787317","text":"from __future__ import annotations\nimport numpy as np\nimport zarr\n\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\n\ndef get_crop_rect(query_mask: np.ndarray, threshold=0) -> tuple[int]:\n '''crops a mask'''\n rows, cols = np.where(query_mask > threshold)\n top = np.min(rows)\n bot = np.max(rows) + 1\n left = np.min(cols)\n right = np.max(cols) + 1\n return top, left, bot, right\n\nclass SearchTool:\n '''Base class for searching across feature tensors. `compute()` is not implemented here, so\n it is recommended to use `CachedSearchTool` or `LiveSearchTool` instead.'''\n\n def __init__(self, model, device):\n self._model = model\n self._device = device\n\n def set_input_image(self, query_image: torch.Tensor):\n '''Assumes `query_image` is already preprocessed'''\n query_image = query_image.to(self._device)\n self._query_features = self._model(query_image[None, :, :, :]).to(self._device)\n\n def compute(self, query_mask):\n raise NotImplementedError('Do not use the SearchTool base class')\n\n def compute_batch(self, query_mask: np.ndarray, batch_arr: np.ndarray | torch.Tensor) -> tuple[torch.Tensor]:\n '''Computes cosine similarities for a single batch.'''\n top, left, bot, right = get_crop_rect(query_mask)\n cropped_mask = query_mask[top:bot, left:right]\n cropped_query_features = self._query_features[..., top:bot, left:right]\n\n # TODO: doing this once per batch is a potential bottleneck -- switch to doing it once\n mask_tensor = torch.tensor(cropped_mask).to(self._device)\n mask_tensor = mask_tensor[None, None, :, :] # reshape to match feature tensors\n\n region_query_features = cropped_query_features * mask_tensor\n norm_query_features = region_query_features \\\n / torch.linalg.vector_norm(region_query_features, dim=[1, 2, 3], keepdim=True)\n\n q_height = bot - top\n q_width = right - left\n\n width = batch_arr.shape[3]\n height = batch_arr.shape[2]\n\n if isinstance(batch_arr, np.ndarray):\n batch_vecs = torch.from_numpy(batch_arr)\n else:\n batch_vecs = batch_arr\n \n batch_vecs = batch_vecs.to(self._device)\n batch_sims = torch.zeros(len(batch_vecs)).to(self._device)\n batch_xs = torch.zeros(len(batch_vecs)).to(self._device)\n batch_ys = torch.zeros(len(batch_vecs)).to(self._device)\n\n # CONVOLUTION IDEAS\n # goal is to find cos(theta) = A . B / (||A|| * ||B||)\n # - first do convolution between batch_vecs (tensor) and norm_query_features*mask_tensor (kernel)\n # - batch_vecs is not normalized, so we need to find vector mag. for each window we used\n # - this can be done by first doing batch_vecs * batch_vecs (element-wise)\n # - then, we can do a second convolution between squared vecs and the mask tensor to get squared magnitude\n # - then just divide convolution outputs element-wise\n\n scaledSims = torch.conv2d(batch_vecs.double(), norm_query_features * mask_tensor)\n\n sq_batch_vecs = batch_vecs * batch_vecs\n sq_mask_tensor = mask_tensor * mask_tensor\n batch_mags = torch.conv2d(sq_batch_vecs.double().view(-1, 1, height, width), sq_mask_tensor)\n batch_mags = batch_mags.view(batch_vecs.shape[0], \n batch_vecs.shape[1],\n height - q_height + 1,\n width - q_width + 1)\n batch_mags = torch.sum(batch_mags, 1, keepdim=True)\n batch_mags = torch.sqrt(batch_mags) + 1e-5 # add small eps to avoid NaN values\n\n window_sims = scaledSims / batch_mags\n window_sims = window_sims.view(window_sims.shape[0], -1)\n\n batch_sims, idxs = window_sims.max(dim=1)\n batch_xs = idxs % (width - q_width + 1)\n batch_ys = torch.div(idxs, width - q_width + 1, rounding_mode='floor')\n\n return batch_sims, batch_xs, batch_ys\n\nclass LiveSearchTool(SearchTool):\n '''Implementation of `SearchTool` that computes features on the fly. \n Does not require a precomputed feature cache, but should only be used with\n small/medium datasets.'''\n def __init__(self, model, device, dataset: Dataset, batch_size=64):\n super().__init__(model, device)\n self._dataset = dataset\n # get all feature vectors from dataset\n self._all_vecs = self.__get_feature_vecs(batch_size)\n\n def __get_feature_vecs(self, batch_size):\n dl = DataLoader(self._dataset, batch_size)\n with torch.no_grad():\n it = iter(dl)\n all_vecs = []\n for batch in it:\n batch = batch.to(self._device)\n all_vecs.append(self._model(batch).cpu())\n del batch\n return all_vecs\n \n @torch.no_grad()\n def compute(self, query_mask):\n sims = []\n xs = []\n ys = []\n for batch in self._all_vecs:\n batch_sims, batch_xs, batch_ys = self.compute_batch(query_mask, batch)\n sims.append(batch_sims)\n xs.append(batch_xs)\n ys.append(batch_ys)\n return torch.cat(sims), torch.cat(xs), torch.cat(ys)\n\nclass CachedSearchTool(SearchTool):\n '''Implementation of `SearchTool` that uses a precomputed cache to efficiently \n compute search results. See `caching.py` for creating a new cache.''' \n def __init__(self, model, cache: zarr.Array, device, batch_size=500):\n super().__init__(model, device)\n self._cache = cache\n self._batch_size = batch_size\n\n @torch.no_grad()\n def compute(self, query_mask):\n sims = []\n xs = []\n ys = []\n for i in range(0, len(self._cache), self._batch_size):\n batch_arr = self._cache[i:i + self._batch_size]\n batch_sims, batch_xs, batch_ys = self.compute_batch(query_mask, batch_arr)\n sims.append(batch_sims)\n xs.append(batch_xs)\n ys.append(batch_ys)\n return torch.cat(sims), torch.cat(xs), torch.cat(ys)\n","repo_name":"lookingglasslab/VisualFeatureSearch","sub_path":"vissearch/searchtool.py","file_name":"searchtool.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"24"} +{"seq_id":"38425942872","text":"def gcd(a, b):\n if a == 0:\n return b\n\n if b == 0:\n return a\n\n if a == b:\n return a\n\n if a > b:\n return gcd(a-b, b)\n else:\n return gcd(a, b-a)\n\n\nwhile True:\n try:\n num1 = int(input(\"Enter number 1: \"))\n except ValueError as err:\n print(\"Please enter a number\")\n else:\n if num1 < 0:\n print(\"Enter a positive number\")\n else:\n break\n\nwhile True:\n try:\n num2 = int(input(\"Enter number 2: \"))\n except ValueError as err:\n print(\"Please enter a number\")\n else:\n if num1 < 0:\n print(\"Enter a positive number\")\n else:\n break\n\nans = gcd(num1, num2)\nprint(f\"HCF of {num1} and {num2} is {ans}\")","repo_name":"badri-s2001/GCD","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"24095774870","text":"import numpy as np\n\n#----------------------------------------------------\n#Min max scaling of individual angle vectors\n#Adjustment of angle vectors into a data matrix and a label matrix\n#---------------------------------------------------\n\n\n#Total number of videos, half violent and half nonviolent\nsize = 100\nhalfSize = int(size/2)\n\nvMat = np.load('AngleMatrices/vMat.npy')\nnvMat = np.load('AngleMatrices/nvMat.npy')\n\n#Data matrix definition\ndataMat = np.zeros((size,10,260))\n\n#LAbel matrix definition\nlabelMat = np.zeros((size,))\n\nfor i in range(halfSize):\n\ta = 2*i\n\tb = (2*i)+1\n\tdataMat[a]=nvMat[i]\n\tdataMat[b]=vMat[i]\n\tlabelMat[b]=1.0\nprint(dataMat.shape)\n\n\n#Min max scaling\nfor k in range(size):\n\tfor l in range(10):\n\t\tfor m in range(13):\n\t\t\tind1 = m*20\n\t\t\tind2 = (m+1)*20\n\t\t\tmaxim = 0\n\t\t\tminim = 10000\n\t\t\tfor n in range(ind1,ind2):\n\t\t\t\tif dataMat[k][l][n]>maxim:\n\t\t\t\t\tmaxim = dataMat[k][l][n]\n\t\t\t\tif dataMat[k][l][n]0:\n\t\t\t\t\tdataMat[k][l][n]=(dataMat[k][l][n]-minim)/dif\n\t\t\t\telse:\n\t\t\t\t\tdataMat[k][l][n]=0\n\nnp.save('TestMatrices/dataMat.npy',dataMat)\nnp.save('TestMatrices/labelMat.npy',labelMat)\n","repo_name":"jpowellgz/FightDetectionPoseLSTM","sub_path":"3_AngleVectorsAdjustement.py","file_name":"3_AngleVectorsAdjustement.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"41496446239","text":"import pytest\nfrom burdoc.elements import Bbox, LayoutElement\nfrom burdoc.utils.layout_graph import LayoutGraph\n\n@pytest.fixture\ndef page_bbox():\n return Bbox(0, 0, 200, 300, 200, 300)\n\n@pytest.fixture\ndef layout_elements(page_bbox):\n a = LayoutElement(Bbox(5, 5, 95, 10, page_bbox.x1, page_bbox.y1))\n b = LayoutElement(Bbox(105, 5, 195, 20, page_bbox.x1, page_bbox.y1))\n c = LayoutElement(Bbox(30, 70, 87, 90, page_bbox.x1, page_bbox.y1))\n d = LayoutElement(Bbox(50, 100, 150, 200, page_bbox.x1, page_bbox.y1 ))\n return [a,b,c,d]\n\n@pytest.fixture\ndef layout_graph(page_bbox, layout_elements):\n return LayoutGraph(page_bbox, layout_elements)\n\n\nclass TestLayoutGraph():\n \n def test_layout_graph_creation(self, page_bbox, layout_elements):\n lg = LayoutGraph(page_bbox, layout_elements)\n\n assert len(lg.nodes[0].down) == 2\n assert lg.nodes[1].element == layout_elements[0]\n \n def test_layout_graph_get_node_int(self, layout_graph):\n assert layout_graph.get_node(0) == layout_graph.nodes[0]\n \n def test_layout_graph_get_node_tuple(self, layout_graph):\n assert layout_graph.get_node((0, 11.1)) == layout_graph.nodes[0]\n \n @pytest.mark.parametrize('node_results', [\n [0, [(1,5),(2,5)], [], [], []],\n [1, [(3, 60)], [(0, 5)], [], [(2, 10)]],\n [2, [(4, 80)], [(0,5)], [(1, 10)], []],\n [3, [(4, 10)], [(1, 60)], [], []],\n [4, [], [(3, 10), (2,80)], [], []]\n ])\n def test_layout_graph_result_root(self, layout_graph, node_results):\n i = node_results[0]\n assert layout_graph.nodes[i].down == node_results[1]\n assert layout_graph.nodes[i].up == node_results[2]\n assert layout_graph.nodes[i].left == node_results[3]\n assert layout_graph.nodes[i].right == node_results[4]\n \n @pytest.mark.parametrize('node_results', [\n [0, []],\n [1, [0]], [2, [0, 1]],\n [3, [1,0]], [4, [3, 2, 1, 0]]\n ])\n def test_node_has_ancestor(self, layout_graph, node_results):\n ancestors = []\n for i in range(5):\n if i == node_results[0]:\n continue\n if layout_graph.node_has_ancestor(node_results[0], i):\n ancestors.append(i)\n assert set(ancestors) == set(node_results[1])\n \n def test_str(self, layout_graph):\n lg_str =\\\n\"\"\"==============================\n------------------------------\n\nU: \nL: \nR: \nD: (,5),\n (,5)\n------------------------------\n\nU: (,5)\nL: \nR: (,10)\nD: (,60)\n------------------------------\n\nU: (,5)\nL: (,10)\nR: \nD: (,80)\n------------------------------\n\nU: (,60)\nL: \nR: \nD: (,10)\n------------------------------\n\nU: (,10),\n (,80)\nL: \nR: \nD: \n==============================\"\"\"\n\n #Can't compare direct values as IDs change...\n assert len(str(layout_graph)) == len(lg_str)\n ","repo_name":"jennis0/burdoc","sub_path":"tests/unit/utils/test_layout_graph.py","file_name":"test_layout_graph.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"70040077184","text":"\"\"\"Contains membership function group (MFGroup) definition.\n\nMFGroups serve as named collections of membership functions and has a method\nfor plotting.\n\nAn example of a group could be 'temperature', comprised of membership functions\nsuch as 'cold', 'warm', and 'hot'.\n\"\"\"\n\nfrom typing import List, Tuple, Optional\n\nimport numpy as np\nfrom matplotlib.axis import Axis\nimport matplotlib.pyplot as plt\n\nfrom hotfis import FuzzyFunc\n\n\nclass FuzzyGroup:\n \"\"\"A collection of membership functions corresponding to fuzzy sets.\n\n Args:\n name: The name of the group.\n xmin: Smallest group domain value used in Mamdani evaluation and visualization.\n xmax: Largest group domain value used in Mamdani evaluation and visualization.\n fns: A list of MembFuncs stored in the group.\n\n Attributes:\n fns (Dict[MembFunc]): Dictionary of MembFuncs stored in the group.\n Their names are keys and the objects themselves are values.\n name (str): The name of the group.\n domain (Tuple[float, float]): Domain for Mamdani evaluation and visualization.\n \"\"\"\n # -----------\n # Constructor\n # -----------\n\n def __init__(self, name: str, xmin: float, xmax: float, fns: List[FuzzyFunc]):\n # Save group name and functions\n self.name = name\n self.fns = {fn.name: fn for fn in fns}\n\n # Get domain range used in Mamdani evaluation\n self.domain = (xmin, xmax)\n\n # -------\n # Methods\n # -------\n\n def __getitem__(self, fn_name) -> FuzzyFunc:\n \"\"\"Supports subscripting with membership function name.\n\n Args:\n fn_name: Name of the function to retrieve.\n \"\"\"\n return self.fns[fn_name]\n\n def __setitem__(self, fn_name: str, fn: FuzzyFunc):\n \"\"\"Supports function assignment with subscripting.\n\n Will overwrite a function if it already exists.\n\n Args:\n fn_name: The function name.\n fn: Function to save in group.\n \"\"\"\n self.fns[fn_name] = fn\n\n def __iter__(self):\n \"\"\"Can iterate through each membership function.\n \"\"\"\n return iter(self.fns.values())\n\n def keys(self):\n \"\"\"Returns the names of each contained membership function.\n \"\"\"\n return self.fns.keys()\n\n def items(self):\n \"\"\"Returns each function name and object.\n \"\"\"\n return self.fns.items()\n\n def values(self):\n \"\"\"Returns each membership function.\n \"\"\"\n return self.fns.values()\n\n def plot(self, start: Optional[float] = None, stop: Optional[float] = None,\n num_points: int = 500, stagger_labels: bool = False,\n line_color: str = \"black\", fill_alpha=0.1,\n **plt_kwargs) -> Tuple[Axis, Axis]:\n \"\"\"Plots every function in the group in a new figure.\n\n Args:\n start: Specified start of plot domain.\n Defaults to group domain start if None is passed.\n stop: Specified end of plot domain.\n Defaults to group domain end if None is passed.\n num_points: Number of points to plot for each function.\n stagger_labels: Whether to stagger function label names on top.\n line_color: matplotlib.pyplot color of the line representing the function.\n fill_alpha: Alpha of function color. Set to 0.0 for no fill.\n **plt_kwargs: matplotlib.pyplot plotting options.\n\n Returns:\n The main axis being plotted and a second twin axis on top\n where function names are written as xtick labels.\n \"\"\"\n # Create figure twin x axes (top one for function names)\n ax1 = plt.gca()#plt.gcf().add_subplot(111)\n ax2 = plt.gca().twiny()\n\n # Prepare to save xticks\n xticks = dict()\n\n all_tsk = all([fn.fn_type == \"tsk\" for fn in self])\n\n # Create domain based on given parameters or group's domain\n if start is not None and stop is not None:\n domain = np.linspace(start, stop, num_points)\n else:\n domain = np.linspace(self.domain[0], self.domain[1], num_points)\n\n # For each function, plot and update x-ticks\n for fn in self:\n # Plot function if not TSK\n if fn.fn_type != \"tsk\":\n codomain = fn(domain)\n ax1.plot(domain, codomain, color=line_color, **plt_kwargs)\n ax1.fill_between(domain, codomain, alpha=fill_alpha)\n\n # Update function x-tick labels\n xtick_val = fn.center\n if xtick_val not in xticks.values():\n xticks[fn.name] = xtick_val\n else:\n key = [k for k, v in xticks.items() if v == xtick_val][0]\n xticks[f\"{key}/{fn.name}\"] = xtick_val\n del xticks[key]\n\n # TSK functions\n else:\n ax1.axvline(fn.center, color=line_color, ymax=0.95, **plt_kwargs)\n xticks[fn.name] = fn.center\n\n # Finalize x and y limits and x-ticks\n ax1.set_ylim(0.0, 1.05)\n if not all_tsk:\n ax1.margins(0.0, x=True)\n ax2.set_xlim(ax1.get_xlim())\n ax2.set_xticks(list(xticks.values()))\n ax2.set_xticklabels(list(xticks.keys()), fontsize=8)\n ax2.set_ylim(0.0, 1.05)\n\n # Stagger function name labels if requested\n if stagger_labels:\n for tick in ax2.xaxis.get_major_ticks()[1::2]:\n tick.set_pad(15)\n\n # Decorate\n plt.title(self.name, pad=16)\n ax1.grid(visible=True, axis=\"y\", alpha=0.5, ls=\"--\")\n\n plt.sca(ax1)\n\n return ax1, ax2","repo_name":"ericzander/hotfis","sub_path":"hotfis/membership/fuzzygroup.py","file_name":"fuzzygroup.py","file_ext":"py","file_size_in_byte":5686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"24"} +{"seq_id":"33081870689","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSynopsis:\nParse the title and release date of movies, series... \n\nSteps:\n- Ask user for the number of pages to parse\n- Use a function to parse the number of websites\nImput:\n- if we inspect the elements of the web we can see that class clearfix holds all info we want\n- user select number of pages\nOutput:\n- Print the releases on each page\n\nDisclamer: The only intent of this script is to illustrate an example\nof how to parse a website and how to work with regular expressions.\nThis is for Use at your own risk!\n\n\"\"\"\nfrom ParseWeb import ParseWeb\n\ndef main():\n numPages = input('Enter number of pages:')\n num = 1\n while num <= int(numPages):\n ParseWeb(num)\n num += 1\n \nmain()\nif __name__ == '__name__':\n main()\n\n\n\n\n","repo_name":"jorgebarron/parseweb","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"3961915825","text":"import numpy as np\n\ndef extract_data(dir):\n len_list = len_file(dir)\n data = [[0,0] for k in range(len_list)]\n file = open(dir)\n for i in range(len_list):\n coord = file.readline().split('\\t')\n for j in range(2):\n data[i][j]= int(coord[j])\n return data\n\n\ndef len_file(dir):\n file=open(dir)\n rl=file.readline()\n len_f=0\n while rl!='':\n len_f+=1\n rl=file.readline()\n return len_f\n\ndef save_list_point(list, dir, name, type):\n file = open(dir + name + type,'w')\n for point in list:\n file.write(str(point[0]))\n file.write('\\t')\n file.write(str(point[1]))\n file.write('\\n')\n return True\n\n\n","repo_name":"millischerm/Puzzle-","sub_path":"CODE/File_Function.py","file_name":"File_Function.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"9758123468","text":"import torch\nimport numpy as np\nfrom eval import metrics\nimport gc\nimport copy\n\ndef evaluate(model, data, device, mode):\n \"\"\" evaluate model on recommending items to groups \"\"\"\n eval_loss = 0.0\n ndcg10_list, hits10_list = [], []\n\n\n model.eval()\n with torch.no_grad():\n data = copy.copy(data).to(device) \n heldout_data = data['group'].val_y \n if mode == 'test':\n heldout_data = data['group'].test_y\n\n eval_group_idx = torch.unique(heldout_data.nonzero(as_tuple=True)[0])\n\n out = model(data.x_dict, data.edge_index_dict)\n result = out['group'].softmax(1)\n mask = data['group'].y.nonzero()\n result[mask[:,0], mask[:,1]] = -np.inf\n\n result = result[eval_group_idx]\n heldout_data = heldout_data[eval_group_idx]\n\n\n \n hits10 = metrics.hits_at_k_batch_torch(result, heldout_data, 10)\n ndcg10 = metrics.ndcg_binary_at_k_batch_torch(result, heldout_data, 10, device=device)\n\n ndcg10_list.append(ndcg10)\n hits10_list.append(hits10)\n\n del data\n gc.collect()\n\n ndcg10_list = torch.cat(ndcg10_list)\n hits10_list = torch.cat(hits10_list)\n return eval_loss, torch.mean(ndcg10_list), torch.sum(hits10_list)/eval_group_idx.shape[0]\n\n","repo_name":"lekwanghee/GraphGR","sub_path":"eval/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"21589141504","text":"# Write a program that, given some value, tests that value for its type. Here's what you should do for each type:\n# Integer\n# If the integer is greater than or equal to 100, print \"That's a big number!\" If the integer is less than 100, print \"That's a small number\"\nsI = 45\nbI = 200\nif sI <= 100:\n print(\"That's a small number!\")\nelse:\n print(\"That's a big number!\")\nif bI >= 100:\n print(\"That's a big number!\")\nelse:\n print(\"That's a small number!\")\n# String\n# If the string is greater than or equal to 50 characters print \"Long sentence.\" If the string is shorter than 50 characters print \"Short sentence.\"\nsS = \"Rubber baby buggy bumpers\"\nbS = \"Experience is simply the name we give our mistakes\"\nif len(sS) <= 50:\n print(\"Short sentence.\")\nelse:\n print(\"Long sentence.\")\nif len(bS) >= 50:\n print(\"Long sentence.\")\nelse:\n print(\"Short sentence.\")\n# List\n# If the length of the list is greater than or equal to 10 print \"Big list!\" If the list has fewer than 10 values print \"Short list.\"\naL = [1, 7, 4, 21]\nbL = [3, 5, 7, 34, 3, 2, 113, 65, 8, 89]\nif len(aL) <= 10:\n print(\"Short list.\")\nelse:\n print(\"Big list!\")\nif len(bL) >= 10:\n print(\"Big list!\")\nelse:\n print(\"Small list.\")\n","repo_name":"justnclrk/python","sub_path":"python_fundamentals/Filter by Type/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"3897092113","text":"import urllib.request\nimport re\nfrom bs4 import BeautifulSoup, SoupStrainer\nfrom collections import Counter\nfrom urllib.parse import urljoin\n# Website that needs to be scrapped\ndef get_text(url):\n # Crawl the site to get html \n # What if the link doesnot exist?\n try:\n site = urllib.request.urlopen(url).read().decode('utf-8')\n except urllib.error.URLError as e:\n return ''\n else:\n html = BeautifulSoup(site, \"html.parser\") # Get the html from the site\n # Remove all script and style tags\n for script in html([\"script\", \"style\"]):\n script.extract() # Remove these two elements from the BS4 object\n # Get the plain text from html\n text = html.get_text()\n lines = (line.strip() for line in text.splitlines()) # break into lines\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \")) # break multi-headlines into a line each\n def chunk_space(chunk):\n chunk_out = chunk + ' ' # Need to fix spacing issue\n return chunk_out \n text = ''.join(chunk_space(chunk) for chunk in chunks if chunk).encode('utf-8') # Get rid of all blank lines and ends of line\n # Now clean out all of the unicode junk (this line works great!!!)\n text = text.decode('unicode_escape').encode('ascii', 'ignore') # Need this as some websites aren't formatted\n text = text.decode('utf-8') # Bytecode like text to string probably wierd with python 3\n text = re.sub(\"[^a-zA-Z.+3]\",\" \", text) # Now get rid of any terms that aren't words\n text = text.lower().split() # Go to lower case and split them apart \n return text\ndef get_word_counts(text):\n only = {}\n for i in text: \n if i not in only: #variable w parses each word to check if it is repeating and counting\n only[i] = 1\n else:\n only[i] += 1\n# with open(str(file )+ \".txt\", \"w\") as text_file:\n# for w1 in sorted(only.keys()):\n# text_file.write('\\n' + str(w1) + \" \" + str(only[w1]))\n# text_file.write('\\n\\n\\n')\n \n return only \nurls =[] \nmainurl = 'http://www.memphis.edu/'\nhtmlfile = urllib.request.urlopen(mainurl).read()\n\n# Query webpage to get all links only\nfor link in BeautifulSoup(htmlfile, \"html.parser\", parse_only=SoupStrainer('a')):\n if 'href' in getattr(link, 'attrs', {}):\n href = link['href']\n # Remove any whitespace\n href = href.strip()\n \n # Cleaning hashed urls and url params\n if '#' in href:\n href = href.split('#')[0]\n if '?' in href:\n href = href.split('?')[0]\n \n # Ignore PPT file, main url, email links\n if (('.ppt' in href) or('.pdf' in href) or (href == mainurl) or ('mailto:' in href)):\n continue\n \n # Uh! Check for relative URL if so fix it by appending the current url\n if re.match('^[a-zA-Z]{2,}:', href) == None:\n abs_href = urljoin(mainurl, href)\n #print(\"Converting relative URL to absolute: %s -> %s\" % (href, abs_href))\n href = abs_href\n \n if (href not in urls):\n # Finally! push it to urls list\n urls.append(href)\n \nall_words = {}\noccurances ={}\nfor i in range(len(urls)):\n print(urls[i]);\n previous = Counter(all_words) \n next = Counter(get_word_counts(get_text(urls[i])))\n if (i==0):\n for word in next.keys():\n occurances[word] = 1\n else:\n for word in next.keys():\n if word in occurances:\n occurances[word] += 1 \n \n all_words = previous + next\nall_words = Counter(all_words)\nprint(len(occurances))\nwith open(\"occurrances.txt\", \"w\") as text_file:\n for word in sorted(occurances.keys()):\n text_file.write('\\n' + str(word) + \" \" + str(occurances[word]))\n \nwith open(\"test.txt\", \"w\") as text_file:\n for w1 in sorted(all_words.keys()):\n text_file.write('\\n' + str(w1) + \" \" + str(all_words[w1]))","repo_name":"vsreelasya/Search-engine","sub_path":"Assignment3/Vallabhaneni_SreeLasya_Assignment03_part2.py","file_name":"Vallabhaneni_SreeLasya_Assignment03_part2.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"15283646906","text":"import os\nimport tornado.ioloop\nimport tornado.web\n\n\nclass ImgHandler(tornado.web.RequestHandler):\n def get(self, package):\n self.set_header(\"Content-Type\", \"image/svg+xml\")\n import deps_plot\n svg_content = '\\n'.join(deps_plot.svg_content(package))\n self.write(svg_content)\n\n\napplication = tornado.web.Application(\n [\n (r\"/img/(.*)\", ImgHandler),\n ],\n debug=True\n)\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n application.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n","repo_name":"pelson/conda-forge-deps","sub_path":"serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"27232812699","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 24 15:13:46 2021\r\n\r\n@author: Marius\r\n\"\"\"\r\nfrom print_func import TimePrint\r\nimport requests\r\nimport json\r\n\r\nclass Signal:\r\n def __init__(self, name, key, token, user=None):\r\n self.name = str(name)\r\n self.key = str(key)\r\n self.token = str(token)\r\n self.user = str(user)\r\n self.value = list(str(json.loads(requests.get('https://circusofthings.com/ReadValue',\r\n params = {'Key':self.key,\r\n 'Token':self.token}).content)['Value']))\r\n self.last_value = self.value\r\n TimePrint(f\"Signal instance {self.name} created with key {self.key}.\")\r\n \r\n \r\n def refresh(self):\r\n old_val = self.value\r\n # Leser av signalet og oppdaterer objektets 'value'.\r\n response = requests.get('https://circusofthings.com/ReadValue',\r\n params = {'Key':self.key,\r\n 'Token':self.token})\r\n response = json.loads(response.content)\r\n self.value = list(str(response['Value']))\r\n \r\n if self.value != old_val:\r\n self.last_value = old_val\r\n \r\n \r\n def read(self):\r\n # Leser av signalet og returnerer det som en liste med siffere.\r\n response = requests.get('https://circusofthings.com/ReadValue',\r\n params = {'Key':self.key,\r\n 'Token':self.token})\r\n response = json.loads(response.content)\r\n return list(str(response['Value']))\r\n \r\n def read_int(self):\r\n # Leser av signalet og returnerer det som en int.\r\n response = requests.get('https://circusofthings.com/ReadValue',\r\n params = {'Key':self.key,\r\n 'Token':self.token})\r\n response = json.loads(response.content)\r\n return response['Value']\r\n \r\n def write(self, val):\r\n # Skriver en verdi til signalet\r\n data = {'Key': self.key, 'Token': self.token, 'Value': val}\r\n requests.put('https://circusofthings.com/WriteValue',\r\n data = json.dumps(data),\r\n headers = {'Content-Type': 'application/json'})\r\n self.refresh()\r\n TimePrint(f\"Value '{val}' written to {self.name}.\")\r\n\r\n","repo_name":"LarsiParsii/Smarthus.gruppe9","sub_path":"Python/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"40505299511","text":"from django import views\nfrom django.urls import path\nfrom App import views\napp_name='App'\n\nurlpatterns = [\n path('index/',views.index,name='index'),\n path('adventure/',views.packadv,name='packadv'),\n path('holyday/',views.packholy,name='packholy'),\n path('devotional/',views.packdev,name='packdev'),\n path('honeymoon/',views.packhoney,name='packhoney'),\n # path('bookform/',views.bookform,name='bookform'),\n path('bookpack/',views.bookpack,name='bookpack'),\n path('confirm/',views.confirm,name='confirm'),\n path('bookingconfirmed/',views.booksave,name='booksave'),\n path('contact/',views.msg,name='msg'),\n path('rating/',views.rate,name='rate'),\n\n]\n\n","repo_name":"JithuSebastian24/Tour_Mangement_System","sub_path":"App/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"7155279382","text":"import tensorflow as tf\n\nfrom . import discriminator\nfrom . import eval_metrics\nfrom . import generator\nfrom . import image_utils\nfrom . import predict\nfrom . import train\nfrom . import train_and_eval\nfrom .print_object import print_obj\n\n\ndef pgan_model(features, labels, mode, params):\n \"\"\"Progressively Growing GAN custom Estimator model function.\n\n Args:\n features: dict, keys are feature names and values are feature tensors.\n labels: tensor, label data.\n mode: tf.estimator.ModeKeys with values of either TRAIN, EVAL, or\n PREDICT.\n params: dict, user passed parameters.\n\n Returns:\n Instance of `tf.estimator.EstimatorSpec` class.\n \"\"\"\n func_name = \"pgan_model\"\n print_obj(\"\\n\" + func_name, \"features\", features)\n print_obj(func_name, \"labels\", labels)\n print_obj(func_name, \"mode\", mode)\n print_obj(func_name, \"params\", params)\n\n # Loss function, training/eval ops, etc.\n predictions_dict = None\n loss = None\n train_op = None\n eval_metric_ops = None\n export_outputs = None\n\n # Instantiate generator.\n pgan_generator = generator.Generator(\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(\n scale_l1=params[\"generator_l1_regularization_scale\"],\n scale_l2=params[\"generator_l2_regularization_scale\"]\n ),\n bias_regularizer=None,\n params=params,\n name=\"generator\"\n )\n\n # Instantiate discriminator.\n pgan_discriminator = discriminator.Discriminator(\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(\n scale_l1=params[\"discriminator_l1_regularization_scale\"],\n scale_l2=params[\"discriminator_l2_regularization_scale\"]\n ),\n bias_regularizer=None,\n params=params,\n name=\"discriminator\"\n )\n\n # Create alpha variable to use for weighted sum for smooth fade-in.\n alpha_var = tf.get_variable(\n name=\"alpha_var\",\n dtype=tf.float32,\n initializer=tf.zeros(shape=[], dtype=tf.float32),\n trainable=False\n )\n print_obj(func_name, \"alpha_var\", alpha_var)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n # Get predictions and export outputs.\n (predictions_dict,\n export_outputs) = predict.get_predictions_and_export_outputs(\n features=features,\n generator=pgan_generator,\n params=params\n )\n else:\n # Get logits and losses from networks for train and eval modes.\n (real_logits,\n fake_logits,\n generator_total_loss,\n discriminator_total_loss) = train_and_eval.get_logits_and_losses(\n features=features,\n generator=pgan_generator,\n discriminator=pgan_discriminator,\n alpha_var=alpha_var,\n params=params\n )\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Get loss and train op for EstimatorSpec.\n loss, train_op = train.get_loss_and_train_op(\n generator_total_loss=generator_total_loss,\n discriminator_total_loss=discriminator_total_loss,\n alpha_var=alpha_var,\n params=params\n )\n else:\n # Get eval metric ops for EstimatorSpec.\n loss, eval_metric_ops = eval_metrics.get_loss_and_eval_metric_ops(\n discriminator_total_loss=discriminator_total_loss,\n real_logits=real_logits,\n fake_logits=fake_logits\n )\n\n # Return EstimatorSpec\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_dict,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs\n )\n","repo_name":"ryangillard/artificial_intelligence","sub_path":"machine_learning/gan/pgan/tf_pgan/pgan_module/trainer/pgan.py","file_name":"pgan.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"11005352644","text":"from flask import Blueprint, request, g\nfrom . import auth\nfrom .data.database import Character\nfrom .webutil import ViewReturn\n\nbp = Blueprint(\"search\", __name__)\n\n\n@bp.route(\"/api/search\")\n@auth.login_required\n@auth.admin_only\ndef search() -> ViewReturn:\n search_like = \"%{}%\".format(request.args[\"query\"])\n\n results = []\n for character in (\n g.db.query(Character)\n .filter(Character.name.like(search_like))\n .order_by(Character.name.asc())\n ):\n results.append(\n {\n \"id\": character.id,\n \"name\": character.name,\n }\n )\n\n return {\n \"query\": request.args[\"query\"],\n \"results\": results,\n }\n","repo_name":"testkil/tdf-waitlist","sub_path":"api/waitlist/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"24"} +{"seq_id":"42230466540","text":"import tweepy\nimport csv\nimport re\nimport string\nimport numpy as np\nfrom PreProcessTweets import PreProcessTweets\n\nclass LivePredictionStream(tweepy.StreamListener):\n\n def __init__(self, \n file_name, \n predictor,\n num_iter=10, \n verbose=0,\n tagignore=[]):\n \n super().__init__()\n self.file_name = file_name\n self.predictor = predictor\n self.num_ter = num_iter\n self.counter = 1\n self.verbose = verbose\n self.tagignore = tagignore\n\n def on_status(self, status):\n \n if not hasattr(status, \"retweeted_status\"):\n \n with open(self.file_name, \"a\", encoding='utf-8', newline='') as f:\n \n text = status.extended_tweet[\"full_text\"] if hasattr(status, \"extended_tweet\") else status.text\n text = text.replace(\",\", \"\")\n text = text.replace(\";\", \"\")\n time = status.created_at\n user = status.user.screen_name\n \n # Preprocess\n text = re.sub(r'\\s?http\\S+', \"\", text)\n text = [word for word in text.split() if word not in self.tagignore]\n text = \" \".join(text)\n show_text = text\n text = text.translate(str.maketrans('', '', string.punctuation))\n\n \n if self.verbose > 0:\n p = self.predictor.model.predict_proba([text])\n label = np.argmax(p)\n label_text = (\"denial\" if label == 0 else \"normal\")\n print(f\"({self.counter}) {label_text} ({p[0][label]:.2f}): {show_text}\")\n \n location = status.user.location\n coordinates = None\n if status.place is not None:\n coordinates = status.place.bounding_box.coordinates\n\n tags = PreProcessTweets.get_tags(text, True)\n \n writer = csv.writer(f, delimiter=\";\")\n writer.writerow([text, tags, user, time, location, coordinates])\n \n # Increment counter\n self.counter += 1\n \n # End streaming by raising exception\n if self.counter > self.num_ter:\n raise Exception(\"Max iterations reached!\")\n \n def on_error(self, status_code):\n print(\"Error: \", status_code)\n return False\n ","repo_name":"savmasse/bds-project","sub_path":"python files/LivePredictionStream.py","file_name":"LivePredictionStream.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"17067213845","text":"from __future__ import print_function\nimport cmd\nimport sys\nimport logging\nimport pprint\nimport os\nimport time\nfrom optparse import OptionParser\nfrom contextlib import closing\nfrom pyp2p import __version__\nfrom pyp2p.core.exceptions import PyP2pException\nfrom pyp2p.conf.jsonreader import JSONConfReader\n\nimport pyp2p.register as reg\nimport pyp2p.unregister as unreg\nfrom pyp2p.session import P2pSession\n\n\ntry:\n from colorlog import basicConfig\n FORMAT = '%(log_color)s%(asctime)s:%(name)s:%(levelname)s: %(message)s'\nexcept ImportError:\n from logging import basicConfig\n FORMAT = '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n\n\ndef handle_exception(func):\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except PyP2pException as error:\n print(\"P2P error: %s\" % error.msg)\n except Exception as error:\n print(\"Unexpected error: %s\" % error)\n inner.__doc__ = func.__doc__\n return inner\n\n\nclass expected_args(object):\n def __init__(self, arg_count):\n self.arg_count = arg_count\n\n def __call__(self, func):\n \"\"\"\n If there are decorator arguments, __call__() is only called\n once, as part of the decoration process! You can only give\n it a single argument, which is the function object.\n \"\"\"\n def wrapped_f(*args):\n if len(args[1].split()) == self.arg_count:\n return func(*args)\n else:\n print(\"*** Syntax error: expected %s args\" % self.arg_count)\n print(func.__doc__)\n wrapped_f.__doc__ = func.__doc__\n return wrapped_f\n\n\nclass PyP2pShell(cmd.Cmd):\n \"\"\"\n PyP2pShell is the Cmd class of the pyp2p lib\n \"\"\"\n\n intro = \"####################################\\n\" \\\n \"# Welcome to the PyP2pShell shell! #\\n\" \\\n \"# (pyp2p : %s) #\\n\" \\\n \"# Type help or ? to list commands. #\\n\" \\\n \"####################################\\n\" % __version__\n prompt = '(pyp2p) '\n\n def __init__(self, conf):\n cmd.Cmd.__init__(self)\n self.conf = conf\n self.logger = logging.getLogger(\"pyp2p.shell\")\n self.pp = pprint.PrettyPrinter(indent=4)\n self.session = None\n\n def _get_server_and_port_from_conf(self):\n \"\"\"Return a tuple with server and port\"\"\"\n\n current = self.conf[\"current\"]\n return (self.conf[\"domains\"][current][\"server\"],\n self.conf[\"domains\"][current][\"port\"])\n\n @expected_args(0)\n @handle_exception\n def do_clear(self, arg):\n \"\"\"\n Clear screen\n \"\"\"\n arg = arg\n os.system('clear')\n\n @expected_args(2)\n @handle_exception\n def do_register(self, arg):\n \"\"\"\n Register on xmpp server (create an xmpp account)\n\n args: JID password\n \"\"\"\n arg = arg.split()\n (server, port) = self._get_server_and_port_from_conf()\n reg.Register(server_address=server, port=port)\\\n .register(arg[0], arg[1])\n\n @expected_args(2)\n @handle_exception\n def do_unregister(self, arg):\n \"\"\"\n Unregister from xmpp server\n\n args: JID password\n \"\"\"\n arg = arg.split()\n (server, port) = self._get_server_and_port_from_conf()\n unreg.Unregister(server_address=server, port=port)\\\n .unregister(arg[0], arg[1])\n\n @expected_args(2)\n @handle_exception\n def do_start_session(self, arg):\n \"\"\"\n Start an xmpp session to server defined in conf with JID and password\n passed in argument\n\n args: JID password\n \"\"\"\n if self.session is not None:\n print(\"Already in a session. End session first.\")\n else:\n arg = arg.split()\n (server, port) = self._get_server_and_port_from_conf()\n self.session = P2pSession(server_address=server,\n port=port,\n jid=arg[0],\n password=arg[1])\n PyP2pShell.prompt = '(pyp2p) %s>' % arg[0]\n\n @expected_args(0)\n @handle_exception\n def do_end_session(self, arg):\n \"\"\"\n End an xmpp session\n \"\"\"\n try:\n self.session.session_disconnect()\n except AttributeError:\n print(\"No session active\")\n\n time.sleep(2) # let the stream close nicely\n self.session = None\n PyP2pShell.prompt = '(pyp2p) '\n\n @expected_args(0)\n @handle_exception\n def do_show_roster(self, arg):\n \"\"\"\n Display user roster\n Require an active session\n \"\"\"\n try:\n roster = self.session.get_session_roster()\n jid = self.session.get_session_jid()\n except AttributeError:\n print(\"No session active\")\n\n print('Roster for %s' % jid)\n groups = roster.groups()\n for group in groups:\n print('\\n%s' % group)\n print('-' * 72)\n for jid in groups[group]:\n sub = roster[jid]['subscription']\n name = roster[jid]['name']\n if roster[jid]['name']:\n print(' %s (%s) [%s]' % (name, jid, sub))\n else:\n print(' %s [%s]' % (jid, sub))\n\n @expected_args(1)\n @handle_exception\n def do_subscribe(self, arg):\n \"\"\"\n Subscribe to a xmpp user presence\n Require an active session\n\n arg: target_JID\n \"\"\"\n arg = arg.split()\n try:\n self.session.subscribe(targetjid=arg[0])\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(1)\n @handle_exception\n def do_unsubscribe(self, arg):\n \"\"\"\n Unsubscribe from a xmpp user presence\n Require an active session\n\n arg: target_JID\n \"\"\"\n arg = arg.split()\n try:\n self.session.unsubscribe(targetjid=arg[0])\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(1)\n @handle_exception\n def do_remove(self, arg):\n \"\"\"\n Remove completely a xmpp account from roster\n Require an active session\n\n arg: target_JID\n \"\"\"\n arg = arg.split()\n try:\n self.session.remove(targetjid=arg[0])\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(2)\n @handle_exception\n def do_send(self, arg):\n \"\"\"\n Send a message to a xmpp user\n Require an active session\n\n arg: JID msg\n \"\"\"\n arg = arg.split()\n try:\n self.session.session_send(recipient=arg[0], msg=' '.join(arg[1:]))\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(1)\n @handle_exception\n def do_get_privacy(self, arg):\n \"\"\"\n Display privacy list\n Require an active session\n\n arg: listname\n\n \"\"\"\n arg = arg.split()\n try:\n privacy_list = self.session.get_privacy_list(list_name=arg[0])\n print(privacy_list)\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(0)\n @handle_exception\n def do_set_privacy(self, arg):\n \"\"\"\n Set privacy list\n Require an active session\n\n \"\"\"\n arg = arg\n try:\n self.session.set_privacy()\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(0)\n @handle_exception\n def do_get_lists(self, arg):\n \"\"\"\n Display privacy lists\n Require an active session\n\n \"\"\"\n arg = arg\n try:\n privacy_lists = self.session.get_lists()\n print(privacy_lists)\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(0)\n @handle_exception\n def do_authorize_sub(self, arg):\n \"\"\"\n Set library to automatically authorize subscriptions\n Require an active session\n\n \"\"\"\n arg = arg\n try:\n self.session.authorize_subscriptions()\n except AttributeError:\n print(\"No session active\")\n\n @expected_args(0)\n @handle_exception\n def do_reject_sub(self, arg):\n \"\"\"\n Set library to automatically reject subscriptions\n Require an active session\n\n \"\"\"\n arg = arg\n try:\n self.session.reject_subscriptions()\n except AttributeError:\n print(\"No session active\")\n\n\ndef get_conf_filename(options):\n \"\"\"\n Return a configuration filename according to given options\n \"\"\"\n if options.conf_filename is None:\n current_dir = os.path.dirname(os.path.abspath(__file__))\n conf_filename = os.path.join(current_dir, \"conf.json\")\n else:\n conf_filename = options.conf_filename\n\n if not os.path.exists(conf_filename):\n print(\"Error: %s does not exist\" % options.conf_filename)\n sys.exit(1)\n return conf_filename\n\n\ndef main():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--debug\", dest=\"debug_level\",\n help=\"set log level to LEVEL\", metavar=\"LEVEL\")\n parser.add_option(\"-c\", \"--conf\", dest=\"conf_filename\",\n help=\"configuration FILENAME\", metavar=\"FILENAME\")\n parser.add_option(\"-k\", \"--check\", dest=\"check\", action=\"store_true\",\n default=False,\n help=\"only checks that shell app works correctly\")\n (options, args) = parser.parse_args()\n args = args\n if options.debug_level is not None:\n level = int(options.debug_level)\n else:\n level = logging.INFO\n\n basicConfig(level=level, format=FORMAT)\n\n conf = JSONConfReader(conf_filename=get_conf_filename(options)).conf\n\n exit_no = 0\n try:\n shell = PyP2pShell(conf=conf)\n if options.check:\n sys.exit(0)\n shell.cmdloop()\n except PyP2pException as error:\n print(\"Error: %s\" % error)\n exit_no = 2\n except KeyboardInterrupt:\n print(\"Bye!\")\n exit_no = 0\n except Exception as error:\n print(\"Uncaught error: %s\" % error)\n exit_no = 1\n finally:\n try:\n shell.session.session_disconnect()\n except AttributeError: # session already ended\n pass\n sys.exit(exit_no)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fribes/pyp2p","sub_path":"pyp2p/shell/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":10472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"73468045823","text":"\"\"\" This Handler is responsible for recieving a\r\n user id in the wrapper of the API call and\r\n returning a json formatted documents with\r\n a users home screen info.\r\n the wrapper for news will be\r\n http://backendgroovebug.appspot.com/v1/home?user=\"\"\"\r\n\r\n\r\n\r\nimport os\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp.template import render\r\nfrom google.appengine.ext.webapp import util\r\nfrom django.utils import simplejson as json\r\nfrom google.appengine.api import memcache\r\nfrom operator import itemgetter, attrgetter\r\nimport DataModels as models\r\nimport urllib2, urllib, datetime, re, htmlentitydefs, logging\r\nlogging.getLogger().setLevel(logging.DEBUG)\r\n\r\n\r\n\r\n\"\"\" Section ending with \"---\" defines class that handles customized user home\r\n page requests\"\"\"\r\nclass HomeHandler(webapp.RequestHandler):\r\n def get(self, user):\r\n logging.info('START')\r\n currentUserId = self.request.get(\"user\")\r\n\r\n if currentUserId:\r\n currentUserId = currentUserId.encode('utf-8')\r\n if self.request.get('fbauth'):\r\n fbAuth = self.request.get('fbauth')\r\n libraryData = models.GetSavedUserData(currentUserId, fbAuth = fbAuth)\r\n else:\r\n libraryData = models.GetSavedUserData(currentUserId)\r\n logging.info('FINISH')\r\n self.response.headers['Content-Type'] = \"application/json\"\r\n self.response.out.write(json.dumps(libraryData, separators=(',',':')))\r\n else:\r\n finalPreJson = {}\r\n finalPreJson['status'] = {'error' : 'No UDID in url', 'version' : '2'}\r\n logging.info('FINISH')\r\n self.response.headers['Content-Type'] = \"application/json\"\r\n self.response.out.write(json.dumps(finalPreJson, separators=(',',':')))\r\n \r\n def post(self, user):\r\n# Use GetArtistNews method to obtain news data for given artist\r\n logging.info('START')\r\n loadedJson = json.load(self.request.body_file)\r\n currentUserId = self.request.get(\"user\")\r\n\r\n if currentUserId:\r\n currentUserId = currentUserId.encode('utf-8')\r\n if self.request.get('fbauth'):\r\n fbAuth = self.request.get('fbauth')\r\n verifiedLibraryData = models.GetVerifiedData(loadedJson, currentUserId, fbAuth = fbAuth)\r\n else:\r\n verifiedLibraryData = models.GetVerifiedData(loadedJson, currentUserId)\r\n logging.info('FINISH')\r\n self.response.headers['Content-Type'] = \"application/json\"\r\n self.response.out.write(json.dumps(verifiedLibraryData, separators=(',',':')))\r\n else:\r\n finalPreJson = {}\r\n finalPreJson['status'] = {'error' : 'No UDID in url', 'version' : '2'}\r\n logging.info('FINISH')\r\n self.response.headers['Content-Type'] = \"application/json\"\r\n self.response.out.write(json.dumps(finalPreJson, separators=(',',':')))\r\n\"\"\" --- \"\"\"\r\n\r\n\r\n\r\n\"\"\" Main function which handles url mappings\"\"\"\r\napplication = webapp.WSGIApplication([('/v2/home(.*)', HomeHandler),],\r\n debug=True)\r\ndef main():\r\n util.run_wsgi_app(application)\r\nif __name__ == '__main__':\r\n main()\r\n\"\"\" --- \"\"\"\r\n","repo_name":"bbcawodu/groovebug_backend","sub_path":"backendgroovebug 1.2.4/v2/HomeHandler.py","file_name":"HomeHandler.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"24734827504","text":"#\n#\n# 0=================================0\n# | Kernel Point Convolutions |\n# 0=================================0\n#\n#\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Segmentation model\n#\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Hugues THOMAS - 11/06/2018\n#\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Imports and global variables\n# \\**********************************/\n#\n\n\n# Basic libs\nfrom os import makedirs\nfrom os.path import exists\nimport time\nimport tensorflow as tf\nimport sys\n\n# Convolution functions\nfrom models.network_blocks_mprm import assemble_FCNN_CAM_blocks, segmentation_head, multi_segmentation_head\nfrom models.network_blocks_mprm import multi_classification_loss\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Model Class\n# \\*****************/\n#\n\n\nclass KernelPointFCNN:\n\n def __init__(self, flat_inputs, config):\n \"\"\"\n Initiate the model\n :param flat_inputs: List of input tensors (flatten)\n :param config: configuration class\n \"\"\"\n\n # Model parameters\n self.config = config\n\n # Path of the result folder\n if self.config.saving:\n if self.config.saving_path == None:\n self.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())\n else:\n self.saving_path = self.config.saving_path\n if not exists(self.saving_path):\n makedirs(self.saving_path)\n\n ########\n # Inputs\n ########\n\n # Sort flatten inputs in a dictionary\n with tf.variable_scope('inputs'):\n print(len(flat_inputs))\n self.inputs = dict()\n self.inputs['points'] = flat_inputs[:config.num_layers]\n self.inputs['neighbors'] = flat_inputs[config.num_layers:2 * config.num_layers]\n self.inputs['pools'] = flat_inputs[2 * config.num_layers:3 * config.num_layers]\n self.inputs['upsamples'] = flat_inputs[3 * config.num_layers:4 * config.num_layers]\n ind = 4 * config.num_layers\n self.inputs['features'] = flat_inputs[ind]\n ind += 1\n self.inputs['batch_weights'] = flat_inputs[ind]\n ind += 1\n self.inputs['in_batches'] = flat_inputs[ind]\n ind += 1\n self.inputs['out_batches'] = flat_inputs[ind]\n ind += 1\n self.inputs['point_labels'] = flat_inputs[ind]\n ind += 1\n self.labels = self.inputs['point_labels']\n self.inputs['last_batch_ind'] = flat_inputs[ind]\n ind += 1\n self.inputs['stacked_length_out'] = flat_inputs[ind]\n ind += 1\n\n\n if config.network_model in ['multi_segmentation', 'multi_cloud_segmentation']:\n self.inputs['super_labels'] = flat_inputs[ind]\n ind += 1\n\n self.inputs['augment_scales'] = flat_inputs[ind]\n ind += 1\n self.inputs['augment_rotations'] = flat_inputs[ind]\n\n if config.network_model in [\"cloud_segmentation\", 'multi_cloud_segmentation']:\n ind += 1\n self.inputs['point_inds'] = flat_inputs[ind]\n ind += 1\n self.inputs['cloud_inds'] = flat_inputs[ind]\n\n elif config.network_model in ['multi_segmentation', 'segmentation']:\n ind += 1\n self.inputs['object_inds'] = flat_inputs[ind]\n ind += 1\n self.inputs['cloud_labels'] = flat_inputs[ind]\n self.cloud_label = self.inputs['cloud_labels']\n ind += 1\n self.inputs['batch_ind'] = flat_inputs[ind]\n self.batch_ind = self.inputs['batch_ind']\n ind += 1\n self.inputs['cloud_labels_all'] = flat_inputs[ind]\n self.cloud_label_all = self.inputs['cloud_labels_all']\n ind += 1\n self.inputs['stacked_length'] = flat_inputs[ind]\n\n # Dropout placeholder\n self.dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n\n ########\n # Layers\n ########\n\n # Create layers\n with tf.variable_scope('KernelPointNetwork'):\n self.logits, self.class_logits, self.cam = assemble_FCNN_CAM_blocks(self.inputs,\n self.config,\n self.dropout_prob)\n self.logits = tf.multiply(self.logits, self.cloud_label_all)\n\n\n\n\n ########\n # Losses\n ########\n\n with tf.variable_scope('loss'):\n # Classification loss\n self.output_loss1 = multi_classification_loss(self.class_logits[0],self.inputs,config)\n self.output_loss2 = multi_classification_loss(self.class_logits[1],self.inputs,config)\n self.output_loss3 = multi_classification_loss(self.class_logits[2],self.inputs,config)\n self.output_loss4 = multi_classification_loss(self.class_logits[3],self.inputs,config)\n\n # Add regularization\n self.loss = self.regularization_losses() + self.output_loss1 \\\n + self.output_loss2\\\n + self.output_loss3 \\\n + self.output_loss4\n\n return\n\n def regularization_losses(self):\n\n #####################\n # Regularization loss\n #####################\n\n # Get L2 norm of all weights\n regularization_losses = [tf.nn.l2_loss(v) for v in tf.global_variables() if 'weights' in v.name]\n self.regularization_loss = self.config.weights_decay * tf.add_n(regularization_losses)\n\n ##############################\n # Gaussian regularization loss\n ##############################\n\n gaussian_losses = []\n for v in tf.global_variables():\n if 'kernel_extents' in v.name:\n\n # Layer index\n layer = int(v.name.split('/')[1].split('_')[-1])\n\n # Radius of convolution for this layer\n conv_radius = self.config.first_subsampling_dl * self.config.density_parameter * (2 ** (layer - 1))\n\n # Target extent\n target_extent = conv_radius / 1.5\n gaussian_losses += [tf.nn.l2_loss(v - target_extent)]\n\n if len(gaussian_losses) > 0:\n self.gaussian_loss = self.config.gaussian_decay * tf.add_n(gaussian_losses)\n else:\n self.gaussian_loss = tf.constant(0, dtype=tf.float32)\n\n #############################\n # Offsets regularization loss\n #############################\n\n offset_losses = []\n\n if self.config.offsets_loss == 'permissive':\n\n for op in tf.get_default_graph().get_operations():\n if op.name.endswith('deformed_KP'):\n\n # Get deformed positions\n deformed_positions = op.outputs[0]\n\n # Layer index\n layer = int(op.name.split('/')[1].split('_')[-1])\n\n # Radius of deformed convolution for this layer\n conv_radius = self.config.first_subsampling_dl * self.config.density_parameter * (2 ** layer)\n\n # Normalized KP locations\n KP_locs = deformed_positions/conv_radius\n\n # Loss will be zeros inside radius and linear outside radius\n # Mean => loss independent from the number of input points\n radius_outside = tf.maximum(0.0, tf.norm(KP_locs, axis=2) - 1.0)\n offset_losses += [tf.reduce_mean(radius_outside)]\n\n\n elif self.config.offsets_loss == 'fitting':\n\n for op in tf.get_default_graph().get_operations():\n\n if op.name.endswith('deformed_d2'):\n\n # Get deformed distances\n deformed_d2 = op.outputs[0]\n\n # Layer index\n layer = int(op.name.split('/')[1].split('_')[-1])\n\n # Radius of deformed convolution for this layer\n KP_extent = self.config.first_subsampling_dl * self.config.KP_extent * (2 ** layer)\n\n # Get the distance to closest input point\n KP_min_d2 = tf.reduce_min(deformed_d2, axis=1)\n\n # Normalize KP locations to be independant from layers\n KP_min_d2 = KP_min_d2 / (KP_extent**2)\n\n # Loss will be the square distance to closest input point.\n # Mean => loss independent from the number of input points\n offset_losses += [tf.reduce_mean(KP_min_d2)]\n\n if op.name.endswith('deformed_KP'):\n\n # Get deformed positions\n deformed_KP = op.outputs[0]\n\n # Layer index\n layer = int(op.name.split('/')[1].split('_')[-1])\n\n # Radius of deformed convolution for this layer\n KP_extent = self.config.first_subsampling_dl * self.config.KP_extent * (2 ** layer)\n\n # Normalized KP locations\n KP_locs = deformed_KP/KP_extent\n\n # Point should not be close to each other\n for i in range(self.config.num_kernel_points):\n other_KP = tf.stop_gradient(tf.concat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], axis=1))\n distances = tf.sqrt(tf.reduce_sum(tf.square(other_KP - KP_locs[:, i:i+1, :]), axis=2))\n repulsive_losses = tf.reduce_sum(tf.square(tf.maximum(0.0, 1.5 - distances)), axis=1)\n offset_losses += [tf.reduce_mean(repulsive_losses)]\n\n elif self.config.offsets_loss != 'none':\n raise ValueError('Unknown offset loss')\n\n if len(offset_losses) > 0:\n self.offsets_loss = self.config.offsets_decay * tf.add_n(offset_losses)\n else:\n self.offsets_loss = tf.constant(0, dtype=tf.float32)\n\n return self.offsets_loss + self.gaussian_loss + self.regularization_loss\n\n def parameters_log(self):\n\n self.config.save(self.saving_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"plusmultiply/mprm","sub_path":"models/KPFCNN_mprm.py","file_name":"KPFCNN_mprm.py","file_ext":"py","file_size_in_byte":10574,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"24"} +{"seq_id":"27387619324","text":"from os import system\nimport json\n\n\ndef changePath(newpath):\n path = newpath\ndef form():\n choices = {}\n for x in attributes:\n choices[x] = input(f\"{x}: \")\n while choices['Type'] not in f:\n choices['Type'] = input(f\"Type: \")\n f[choices['Type']][choices[\"Title\"]] = choices\n Fsave()\n\ndef removeS(target):\n state = False\n for x in f:\n if target in f[x]:\n for y in f[x]:\n if target == y:\n a = int(input(\"1) Delete\\n2) Edit\\n3)Menu\\n>\"))\n if a == 1:\n del f[x][y]\n break\n if a == 2:\n att = input(\"Attribute you want to change\\n>\")\n if att in f[x][y]:\n f[x][y][att] = input(\"new attribute:\\n>\")\n else:\n print(\"Attribute not found\")\n removeS(target)\n if a == 3:\n break\n state = True\n if state == False:\n print(\"Not found\")\n Fsave()\n\ndef Fsave():\n with open(data_base,\"w\") as fW:\n json.dump(f,fW,indent=4)\n\ndef listAll():\n print(10*\"--\")\n print(f\"Listing Menu\")\n print(10*\"--\")\n\n a = int(input(\"1) All\\n2) Not seen\\n3) Seen\\n4) Category\\n5) Menu\\n>\"))\n system(\"cls\")\n parents =[x for x in f]\n if a == 4:\n print(\"\\n\".join(parents))\n listAll()\n if a == 1 or 2 or 3:\n for x in parents:\n for y in f[x]:\n for b in f[x][y]:\n if a == 1 :\n print(f\"{b:5} : {f[x][y][b]:20}\",end=\"\")\n if a == 2 :\n if f[x][y][\"Watched\"].lower() == \"no\":\n print(f\"{b:5} : {f[x][y][b]:20}\",end=\"\")\n if a == 3 :\n if f[x][y][\"Watched\"].lower() == \"yes\":\n print(f\"{b:5} : {f[x][y][b]:20}\",end=\"\")\n print()\n if a == 5:\n menu()\n\ndef menu():\n system(\"cls\")\n\n print(10*\"--\")\n print(f\"Main Menu\")\n print(10*\"--\")\n\n a = int(input('1) Add item\\n2) Select item\\n3) Show\\n5) Exit\\n>'))\n\n if a == 1:\n form() \n if a == 2:\n removeS(input(\"Select Title: \\n>\"))\n if a == 3:\n listAll()\n if a == 5:\n exit()\nif __name__ == \"__main__\":\n attributes = [\"Title\",\"Type\",\"Watched\",\"Link\"]\n data_base = \"movie_base.json\"\n try:\n f = open(data_base,\"r\")\n f = json.load(f)\n except:\n print(\"Error, please change data path.\")\n changePath()\n \n while True:\n menu()\n","repo_name":"MCEvergarden/Movies_Manager","sub_path":"movieList.py","file_name":"movieList.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"26228326260","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 19 20:16:39 2017\r\n\r\n@author: udaykamal\r\n\"\"\"\r\n\r\n\"\"\"\r\ncode for creating dataframe for challenge classifier model training\r\nhere we label challenges as below: (left=challenge type, right=class map)\r\n\r\n 00-0\r\n 01-1\r\n 02+07-2\r\n 03-3\r\n 04-4\r\n 05-5\r\n 06-6\r\n 08-7\r\n 09-8\r\n 10-9\r\n 11-10\r\n 12-11\r\n\r\nwe choose random 2000 frames from every challenge type. total=24000 samples of 12 class \r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport glob\r\n\r\nimreadpath='D:\\\\imwritepy\\\\'\r\ndf_save_path='D:\\\\dataframe\\\\challenge_detector_data.pkl'\r\n\r\nchallenge_type=np.array(['00','01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'])\r\n\r\ndf_all_img = pd.DataFrame()\r\n\r\nfor i in range (len(challenge_type)):\r\n if i==0:\r\n all_img=glob.glob(imreadpath+'*_*_00_00_00_*.jpg') #no challenge type\r\n else:\r\n all_img=glob.glob(imreadpath+'*_*_01_' + challenge_type[i] + '_03_*.jpg') #all other challenge type of level 3\r\n \r\n if i==5:\r\n all_img=all_img.append(glob.glob(imreadpath+'*_*_01_' + challenge_type[i] + '_05_*.jpg')) #for challenge type 5, we add level 05 samples as they are different than other levels\r\n \r\n if i==7: # as we considered 07 and 02 challenge type as one unified class labelled as 2\r\n lb=2\r\n \r\n elif i>7:\r\n lb=i-1\r\n \r\n else:\r\n lb=i\r\n \r\n all_label=np.full(len(all_img),lb).tolist() #creating the corresponding labels\r\n \r\n data = pd.DataFrame({'filepath': all_img, 'ch_type': all_label})\r\n data=data.sample(frac=2000/len(data)).reset_index(drop=True) #selecting random 2000 samples\r\n df_all_img=df_all_img.append(data,ignore_index=True)\r\n\r\ndf_all_img=df_all_img.sample(frac=1,random_state=200) #shuffling the whole dataframe\r\ndf_all_img.to_pickle(df_save_path) #saving the dataframe\r\n","repo_name":"udaykamal20/Team_Neurons","sub_path":"src/data_create/challenge_detector_model_dataframe_create.py","file_name":"challenge_detector_model_dataframe_create.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"24"} +{"seq_id":"72967350143","text":"__author__ = 'wenjiezeng@google.com (Wenjie Zeng)'\n\nfrom google.appengine.ext.webapp import template\n\nfrom gspeedometer import config\n\n\nclass Icon(object):\n \"\"\"Represent the resource needed to draw a Google Map icon.\"\"\"\n\n def __init__(self, icon_id='icon',\n image=config.DEFAULT_GOOGLEMAP_ICON_IMAGE, shadow='',\n icon_size=(15, 16), shadow_size=(18, 20),\n icon_anchor=(7, 16), info_window_anchor=(5, 1)):\n self.icon_id = icon_id\n # TODO(wenjiezeng): May want to check whether the resource exists.\n self.image = image\n self.shadow = shadow\n self.icon_size = icon_size\n self.shadow_size = shadow_size\n self.icon_anchor = icon_anchor\n self.info_window_anchor = info_window_anchor\n\n def __str__(self):\n return 'Icon ' % (\n self.icon_id, self.image, self.icon_size)\n\n\nclass Map(object):\n \"\"\"Represents the resource needed to draw a map.\"\"\"\n\n def __init__(self, map_id='map', width='500px', height='300px',\n center=(0, 0), zoom='2', show_navcontrols=True,\n show_mapcontrols=True, pointlist=None):\n # id of the html div component\n self.map_id = map_id\n # map div width\n self.width = width\n # map div height\n self.height = height\n # center (lat, long) of the view port\n self.center = center\n # zoom level of the map\n self.zoom = zoom\n # whether to show google map navigation controls\n self.show_navcontrols = show_navcontrols\n # whether to show toogle map type (sat/map/hybrid) controls\n self.show_mapcontrols = show_mapcontrols\n # point list\n self.points = pointlist or []\n\n def AddPoint(self, point):\n \"\"\"Add a point to the map.\"\"\"\n self.points.append(point)\n\n def __str__(self):\n return 'Map ' % (\n self.map_id, self.width, self.height, str(self.center))\n\n\nclass GoogleMapWrapper(object):\n \"\"\"A Python wrapper for Google Maps API.\"\"\"\n\n def __init__(self, key=None, themap=None, iconlist=None):\n # Set the appropriate Google Map key of yours\n self.key = key\n self.themap = themap or Map()\n self.icons = iconlist or []\n\n def AddIcon(self, icon):\n \"\"\"Add an icon as into the map resource so that points can reference it.\"\"\"\n self.icons.append(icon)\n\n def GetGoogleMapScript(self):\n \"\"\"Returns complete javacript for rendering map.\"\"\"\n template_args = {\n 'googlemap_key': self.key,\n 'map': self.themap,\n 'points': self._GetPointsScript(self.themap),\n 'icons': self.icons,\n 'center_lat': self.themap.center[0],\n 'center_lon': self.themap.center[1]\n }\n\n return template.render(\n 'templates/googlemaphelper.html', template_args)\n\n def _GetPointsScript(self, themap):\n if not themap.points:\n return '[]'\n\n script_list = ['[']\n\n # Constructs the points for the map\n for point in themap.points:\n script_list.append(\"[%f, %f, '%s', %s]\" % point)\n script_list.append(',')\n script_list = script_list[:-1]\n script_list.append('];\\n')\n js = ''.join(script_list)\n js = js.replace(\"u'\", \"'\")\n\n return js\n\n def __str__(self):\n iconlist = []\n for icon in self.icons:\n iconlist.append(str(icon))\n iconstr = ''.join(iconlist)\n\n return 'GoogleMapWrapper ' % (\n self.key, iconstr)\n","repo_name":"Mobiperf/MobiPerf","sub_path":"server/gspeedometer/helpers/googlemaphelper.py","file_name":"googlemaphelper.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"24"} +{"seq_id":"72425819902","text":"if __name__ == \"__main__\":\n#3.1\n\tx = 2\n\ty = 3\n\tif (x > y):\n\t result = x\n\telse:\n\t result = y\n#\tpowyższy kod jest poprawny\n\n#\tfor i in \"qwerty\": if ord(i) < 100: print (i)\n#\tpowyżysz kod nie jest poprawny, powinien wygładać tak:\n\tfor i in \"qwerty\":\n\t\tif ord(i) < 100:\n\t\t\tprint (i)\n\n\tfor i in \"axby\": print (ord(i)) if ord(i) < 100 else i\n#\tpowyższy kod jest poprawny\n\n#3.2\n\tL = [3, 5, 4]\n\tL = L.sort()\n\n\t#x, y = 1, 2, 3\n\t#poprawnie\n\tx, y, z = 1, 2, 3\n\n\tX = 1, 2, 3\n\t#cyfry trzeba zapisać w nawiasach kwadratowych\n\tX = [1, 2, 3]\n\tX[1] = 4\n\n\tX = [1, 2, 3]\n\t#X[3] = 4 - wykraczamy poza indeks bo liczymy od 0\n\n\tX = \"abc\"\n\t#X.append(\"d\") - napis nie posiada metody append\n\tX += \"d\"\n\n\t#map(pow, range(8))\n\t#poprawnie:\n\tmap(lambda x: pow(x,2), range(8))\n\n","repo_name":"kamilck13/Python2019","sub_path":"Zadanie 3/zad3_1,2.py","file_name":"zad3_1,2.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"2564990835","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 6 14:46:59 2023\r\n\r\n@author: Семья\r\n\"\"\"\r\nimport pandas as pd\r\nfrom datetime import datetime\r\nfrom stop_words import get_stop_words\r\n\r\n\r\ny_dict = {\r\n \"Зарплатные проекты\": 2,\r\n \"Бизнес-карта\": 1,\r\n \"Эквайринг\": 4,\r\n \"Открытие банковского счета\": 3,\r\n}\r\n\r\nstop_words = get_stop_words(\"ru\")\r\n\r\n\r\ndef decode_to_target(tab):\r\n return tab[\"ACTION_ITEM_RESULT_PRODUCT_NAME\"].apply(lambda x: y_dict[x])\r\n\r\n\r\ndef decode_to_true_date(tab):\r\n date_min = tab[\"date\"].min()\r\n return tab[\"date\"].apply(\r\n lambda x: (\r\n datetime.strptime(x, \"%Y-%m-%d\") - datetime.strptime(date_min, \"%Y-%m-%d\")\r\n ).days\r\n )\r\n\r\n\r\ndef get_aggregated_statistics_on_words_for_category(tab, category):\r\n tab_red = tab[tab[\"ACTION_ITEM_RESULT_PRODUCT_NAME\"] == category]\r\n large_text = \" \".join(tab_red[\"text\"].tolist())\r\n large_text = \" \".join(\r\n [word for word in large_text.split(\" \") if word not in stop_words]\r\n )\r\n aggtab = pd.DataFrame(large_text.split(\" \")).value_counts()\r\n return aggtab\r\n\r\n\r\ndef add_columns_for_catboost(tab, words):\r\n for word in words:\r\n try:\r\n tab[word] = tab[\"text\"].apply(lambda x: word in x.split(\" \"))\r\n tab[word] = tab[word].astype(int)\r\n except:\r\n try:\r\n tab[word] = 0\r\n except:\r\n print(\"miss\")\r\n return tab\r\n","repo_name":"AlexO28/test_assignment_on_nlp","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"9565468174","text":"from datetime import datetime\nimport backtrader as bt\n\n# bt.Strategy 클래스를 상속받아서 MyStrategy 클래스를 작성한다. \nclass MyStrategy(bt.Strategy):\n def __init__(self):\n # RSI 지표를 사용하려면 MyStrategy 클래스 생성자에서 RSI 지표로 사용할 변수를 저장한다. \n self.rsi = bt.indicators.RSI(self.data.close)\n # next() 메서드는 주어진 데이터와 지표(indicator)를 만족시키는 최소 주기마다 자동으로 호출된다. 시장에 참여하고 있지 않을 때 RSI가 30 미망니면 매수하고, 시장에 참여하고 있을 때 RSI가 70을 초과하면 매도하도록 구현한다. \n def next(self):\n if not self.position:\n if self.rsi < 30:\n self.order = self.buy()\n else:\n if self.rsi > 70:\n self.order = self.sell()\n\n# Cerebro 클래스는 백트레이더의 핵심 클래스로서, 데이터를 취합하고 백테스트 또는 라이브 트레이딩을 실행한 뒤 그 결과를 출력하는 기능을 담당한다. \ncerebro = bt.Cerebro()\ncerebro.addstrategy(MyStrategy)\n# 엔씨소프트(036570.KS)의 종가 데이터는 야후 파이낸스 데이터를 이용해서 취합한다. \ndata = bt.feeds.YahooFinanceData(dataname='036570.KS', fromdate=datetime(2017, 1, 1), todate=datetime(2019, 12, 1))\ncerebro.adddata(data)\n# 초기 투자 자금을 천만 원으로 설정한다. \ncerebro.broker.setcash(10000000)\n# 엔씨소프트 주식의 매매 단위는 30주로 설정한다. 보유한 현금에 비해 매수하려는 주식의 총 매수 금액(주가 * 매매 단위)이 크면 매수가 이루어지지 않음에 유의하자. \ncerebro.addsizer(bt.sizers.SizerFix, stake=30)\n\nprint(f'Initial Portfolio Value : {cerebro.broker.getvalue():,.0f} KRW')\n# Cerebro 클래스로 백테스트를 실행한다. \ncerebro.run()\nprint(f'Final Portfolio Value : {cerebro.broker.getvalue():,.0f} KRW')\n# 백테스트 결과를 차트로 출력한다. \ncerebro.plot()","repo_name":"yeonhodev/python_stock_trading","sub_path":"mySite/Investar/Backtrader_RSI.py","file_name":"Backtrader_RSI.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"32944605367","text":"from brownie import Token, accounts\n\ndef test_deploy():\n owner = accounts[0]\n # не знаю как у нас происходит определение, кто owner контракта в brownie, \n # поэтому по аналогии с hardhat пусть будет 1й кто получает подпись\n token = Token.deploy(1000)\n # не знаю как вызвать метод address у owner поэтому пока что закидываю в balanceOf объект, аналогично для других тестов \n balance_owner = token.balanceOf(owner)\n expected = 1000\n\n assert balance_owner == expected\n\ndef test_transfer():\n owner = accounts[0]\n user = accounts[1]\n token = Token.deploy(1000)\n # не знаю как у нас происходит определение, кто owner контракта в brownie, \n # поэтому по аналогии с hardhat пусть будет 1й аккаунт, кто получает подпись\n balance_owner_before = token.balanceOf(owner) \n expected_owner_before = 1000\n assert balance_owner_before == expected_owner_before\n\n balance_user_before = token.balanceOf(user) \n expected_user_before = 0\n assert balance_user_before == expected_user_before\n\n token.transfer(user, 400)\n\n balance_owner_after = token.balanceOf(owner) \n expected_owner_after = 600\n assert balance_owner_after == expected_owner_after\n\n balance_user_after = token.balanceOf(user) \n expected_user_after = 400\n assert balance_user_after == expected_user_after\n\ndef test_burn():\n owner = accounts[0]\n token = Token.deploy(1000)\n balance_owner_before = token.balanceOf(owner) \n expected_owner_before = 1000\n assert balance_owner_before == expected_owner_before\n # как коннектиться к функциям в hardhat просто connect(owner) и тд?\n token.burn(400)\n\n balance_owner_after = token.balanceOf(owner) \n expected_owner_after = 600\n assert balance_owner_after == expected_owner_after\n\n\n\n\n","repo_name":"IvanPsurtcev/ERC20-Brownie","sub_path":"tests/token-test.py","file_name":"token-test.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"29955732921","text":"import pandas as pd\n\nfrom services.inventory_control import InventoryMapping\nfrom services.menu_data import MenuData\nfrom collections import defaultdict\n\nDATA_PATH = \"data/menu_base_data.csv\"\nINVENTORY_PATH = \"data/inventory_base_data.csv\"\n\n\nclass MenuBuilder:\n def __init__(self, data_path=DATA_PATH, inventory_path=INVENTORY_PATH):\n self.menu_data = MenuData(data_path)\n self.inventory = InventoryMapping(inventory_path)\n\n def make_order(self, dish_name: str):\n try:\n curr_dish = [\n dish\n for dish in self.menu_data.dishes\n if dish.name == dish_name\n ][0]\n except IndexError:\n raise ValueError(\"Dish does not exist\")\n\n self.inventory.consume_recipe(curr_dish.recipe)\n\n def get_main_menu(self, restriction=None) -> pd.DataFrame:\n dish_table = defaultdict(list)\n\n for dish in self.menu_data.dishes:\n dish_restrictions = dish.get_restrictions()\n dish_ingredients = dish.get_ingredients()\n ingredients_are_available = (\n self.inventory.check_recipe_availability(dish.recipe)\n )\n\n if (\n restriction not in dish_restrictions\n and ingredients_are_available\n ):\n dish_table[\"dish_name\"].append(dish.name)\n dish_table[\"price\"].append(dish.price)\n dish_table[\"ingredients\"].append(dish_ingredients)\n dish_table[\"restrictions\"].append(dish_restrictions)\n\n return pd.DataFrame(dish_table)\n","repo_name":"ImVictorM/Restaurant-Orders","sub_path":"src/services/menu_builder.py","file_name":"menu_builder.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"37401545229","text":"from typing import List\n\nfloats: List[float] = [12.3554, 4.02, 5.777, 2.12, 3.13, 4.44, 11.0001]\nnames: List[str] = [\"Vanes\", \"Alen\", \"Jana\", \"William\", \"Richards\", \"Joy\"]\nnumbers: List[int] = [22, 33, 10, 6894, 11, 2, 1]\n\n# Создание новых списков с помощью map и lambda-функций\ncubed_rounded_floats = list(map(lambda x: round(x ** 3, 3), floats))\nfiltered_names = list(filter(lambda name: len(name) >= 5, names))\nproduct_of_numbers = int(eval(\"*\".join(map(str, numbers))))\n\nprint(cubed_rounded_floats) # [1928.712, 64.972, 193.463, 9.028, 29.839, 86.902, 1331.331]\nprint(filtered_names) # ['William', 'Richards', 'Joy']\nprint(product_of_numbers) # 68198352\n","repo_name":"RedStrikeRF/skillbox_python","sub_path":"2nd module/lesson 17/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"6013608500","text":"#!/usr/bin/env python3\n\nimport pygame as pg\n\nfrom constants import CODE_PIECES, TAILLE_CASE\nfrom pieces import Dame, Pion, Roi, Tour\nfrom themes import BLACK_CASE, PIECES, SELECTED_CASE, WHITE_CASE, COUP_POSSIBLE, PRISE_POSSIBLE\n\n\nclass Board:\n def __init__(self):\n self.plateau = [\n [14, 15, 16, 13, 12, 16, 15, 14],\n [11, 11, 11, 11, 11, 11, 11, 11],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [4, 5, 6, 3, 2, 6, 5, 4],\n ]\n self.piece_promotion = Dame\n self.piece_est_touchee = False\n self.piece_touchee = None\n self.update_pieces()\n\n def update_pieces(self):\n self.pieces = []\n for y in range(8):\n for x in range(8):\n if self.plateau[y][x]:\n if self.plateau[y][x] > 10:\n color = 2\n else:\n color = 1\n piece = CODE_PIECES[self.plateau[y][x] % 10]\n self.pieces.append(piece(self.plateau, x, y, color))\n\n def deplacer_si_possible(self, x, y):\n if self.coup_legal(self.piece_touchee, x, y):\n\n if isinstance(self.piece_touchee, Roi) and abs(x - self.piece_touchee.x) == 2:\n if x > self.piece_touchee.x:\n self.check_roque(self.piece_touchee, 0, deplacer=True)\n else:\n self.check_roque(self.piece_touchee, 1, deplacer=True)\n\n self.piece_touchee.deplacer(x, y)\n self.update_pieces()\n if isinstance(self.piece_touchee, Pion):\n self.check_promotion(self.piece_touchee)\n self.deselect()\n\n def coup_legal(self, piece, x, y):\n if (x, y) in piece.coups_possibles():\n est_legal = True\n old_plateau = [[case for case in ligne] for ligne in self.plateau]\n if self.piece_touchee != None:\n old_piece_touchee = self.piece_touchee.clone()\n else:\n old_piece_touchee = None\n\n # on vérifie si on peut roquer\n if isinstance(piece, Roi) and abs(x - piece.x) == 2:\n if x > piece.x:\n return self.check_roque(piece, 0, deplacer=False)\n return self.check_roque(piece, 1, deplacer=False)\n\n # ensuite, on anticipe le coup, on vérifie si le roi est en échec puis en replace la pièce et le plateau\n piece.deplacer(x, y)\n self.update_pieces()\n\n # si le roi est en échec\n roi = self.get_king(piece.color)\n if self.case_attaquee(roi.x, roi.y, roi.get_adverse()):\n est_legal = False\n\n for y in range(8):\n for x in range(8):\n self.plateau[y][x] = old_plateau[y][x]\n self.piece_touchee = old_piece_touchee\n self.update_pieces()\n\n return est_legal\n return False\n\n def check_promotion(self, piece: Pion):\n if piece.color == 1 and piece.y == 0:\n self.plateau[piece.y][piece.x] = self.piece_promotion.code\n elif piece.color == 2 and piece.y == 7:\n self.plateau[piece.y][piece.x] = self.piece_promotion.code + 10\n\n self.update_pieces()\n\n def case_attaquee(self, x, y, color):\n for attaquant in self.pieces:\n if attaquant.color == color and (x, y) in attaquant.coups_possibles():\n return True\n return False\n\n def check_roque(self, roi: Roi, direction, deplacer):\n # on vérifie que le cases du roi ne sont pas attaquées\n for i in range(0, 3):\n x, y = roi.horizontale(direction, i)\n if self.case_attaquee(x, y, roi.get_adverse()):\n return False\n\n # on vérifie qu'il y a bien une tour au bout de la rangée\n if direction == 0:\n x_tour = 7\n elif direction == 1:\n x_tour = 0\n tour = self.get_piece(x_tour, roi.y)\n\n if isinstance(tour, Tour) and tour.color == roi.color:\n if deplacer:\n tour.deplacer(*roi.horizontale(direction, 1))\n return True\n return False\n\n def select(self, x, y):\n if self.plateau[y][x]:\n self.piece_touchee = self.get_piece(x, y)\n self.piece_est_touchee = True\n else:\n self.deselect()\n\n def deselect(self):\n self.piece_touchee = None\n self.piece_est_touchee = False\n\n def draw(self, win):\n taille = TAILLE_CASE\n if self.piece_est_touchee:\n cp = [(x,y) for x,y in self.piece_touchee.coups_possibles() if self.coup_legal(self.piece_touchee, x, y)]\n else:\n cp = []\n\n for y in range(8):\n for x in range(8):\n if self.piece_touchee and (x, y) == (self.piece_touchee.x, self.piece_touchee.y):\n color = SELECTED_CASE\n elif (x + y) % 2 == 0:\n color = WHITE_CASE\n else:\n color = BLACK_CASE\n\n case = (x * taille, y * taille, taille, taille)\n pg.draw.rect(win, color, case)\n\n # desinner les pièces\n if self.plateau[y][x]:\n piece_image = PIECES[self.plateau[y][x]]\n piece_image = pg.transform.scale(piece_image, (taille, taille))\n win.blit(piece_image, (x * taille, y * taille))\n\n # dessiner les coups/prises possibles\n if (x, y) in cp:\n if self.plateau[y][x]:\n image = PRISE_POSSIBLE\n else:\n image = COUP_POSSIBLE\n image = pg.transform.scale(image, (taille, taille))\n win.blit(image, (x * taille, y * taille))\n\n def get_piece(self, x, y):\n for piece in self.pieces:\n if piece.x == x and piece.y == y:\n return piece\n return None\n\n def peut_jouer(self, color):\n for piece in self.pieces:\n if piece.color == color:\n self.select(piece.x, piece.y)\n if [(x, y) for x, y in self.piece_touchee.coups_possibles() if self.coup_legal(self.piece_touchee, x, y)]:\n self.deselect()\n return True\n self.deselect()\n return False\n\n def get_king(self, color):\n for piece in self.pieces:\n if isinstance(piece, Roi) and piece.color == color:\n return piece\n\n def get_color(self, x, y):\n if self.plateau[y][x] > 10:\n return 2\n return 1\n","repo_name":"YannlEspiegle/chess","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"26708679471","text":"# 스택/큐\n# https://programmers.co.kr/learn/courses/30/lessons/42583\n\ndef solution(bridge_length, weight, truck_weights):\n bridge_weights = 0\n bridge_trucks = [] #다리에 올라간 트럭\n elapsed_time = -1\n while True:\n #시간 경과\n elapsed_time += 1\n\n #다리를 건너는 트럭 진행\n for truck in bridge_trucks:\n truck[1] += 1\n if truck[1] > bridge_length-1:\n bridge_weights -= truck[0]\n bridge_trucks.pop()\n \n #현재 다리무게와 다음트럭의 무게의 합이 견딜 수 있는 무게보다 같거나 작은 경우\n if truck_weights and bridge_weights+truck_weights[0] <= weight and len(bridge_trucks) < bridge_length:\n bridge_weights += truck_weights[0]\n bridge_trucks.insert(0,[truck_weights[0], 0])\n del truck_weights[0]\n \n #모든 트럭이 통과했을 때 경과시간 반환\n if (not bridge_trucks) and bridge_weights == 0:\n return elapsed_time+1\n\nbridge_length = [\n 2,\n 100,\n 100\n]\n\nweight = [\n 10,\n 100,\n 100\n]\n\ntruck_weights = [\n [7,4,5,6],\n [10],\n [10,10,10,10,10,10,10,10,10,10]\n]\n\nresult = [\n 8,\n 101,\n 110\n]\n\nfor q in [0,1,2]:\n qid = solution(bridge_length[q], weight[q], truck_weights[q])\n if qid == result[q]:\n print(f'correct {qid}')\n else:\n print(f'incorrect {qid}')","repo_name":"westreed/ProgrammersAlgorithm","sub_path":"Programmers/Level2/다리를 지나는 트럭.py","file_name":"다리를 지나는 트럭.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"5614226712","text":"import media\r\nimport fresh_tomatoes\r\n\r\n# creation of each of my favorite movies from class movie defined on media.py\r\nrio = media.Movie(\"Rio\", \"A group of birds out of the african jungle\",\r\n \"https://upload.wikimedia.org/wikipedia/en/b/bb/Rio2011Poster.jpg\",\r\n \"https://www.youtube.com/watch?v=P1GRO31ve5Q\")\r\n\r\n\r\ncivil_war = media.Movie(\"Captain America: civil war\", \"Captain America against Iron Man\",\r\n \"https://upload.wikimedia.org/wikipedia/en/5/53/Captain_America_Civil_War_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=FkTybqcX-Yo\")\r\n \r\n\r\n\r\nstrange = media.Movie(\"Doctor Strange\", \"A medical with supernatural power\",\r\n \"https://upload.wikimedia.org/wikipedia/en/4/4f/Doctor_Strange_Vol_4_2_Ross_Variant_Textless.jpg\",\r\n \"https://www.youtube.com/watch?v=HSzx-zryEgM\")\r\n\r\nfurious_8 = media.Movie(\"Furious 8\", \"Dom with Cipher against his former teammates\",\r\n \"https://upload.wikimedia.org/wikipedia/en/2/2d/The_Fate_of_The_Furious_Theatrical_Poster.jpg\",\r\n \"https://www.youtube.com/watch?v=uisBaTkQAEs\")\r\n\r\n# List movies will hold all the movie objects above\r\nmovies = [rio, civil_war, furious_8, strange]\r\n\r\n\"\"\"Call to the function open_movies_page to display the web page with all the movies\r\nby passing my list of movies\"\"\"\r\n\r\nfresh_tomatoes.open_movies_page(movies)\r\n\r\n \r\n\r\n\r\n","repo_name":"tghil7/movie_trailer","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"15260991908","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom gpiozero import LED\nfrom time import sleep\n\nled = LED(25)\n\ndelay = 1\niterations = 50\nfor i in range(0, iterations-1):\n led.on()\n print('LED On #%i' % i)\n sleep(delay)\n led.off()\n print('LED Off #%i' % i)\n sleep(delay)\n ","repo_name":"chadondata/charpsberry_piduino","sub_path":"easy_raspberry_pi/blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"22350424954","text":"\"\"\"Torch modules for graph convolutions.\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\nimport torch as th\nfrom torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\nfrom dgl import function as fn\nimport numpy as np\nimport dgl.nn.pytorch as conv\n\n# pylint: enable=W0235\nclass GcapsConv(nn.Module):\n r\"\"\"Apply graph convolution over an input signal.\n\n Graph convolution is introduced in `GCN `__\n and can be described as below:\n\n .. math::\n h_i^{(l+1)} = \\sigma(b^{(l)} + \\sum_{j\\in\\mathcal{N}(i)}\\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})\n\n where :math:`\\mathcal{N}(i)` is the neighbor set of node :math:`i`. :math:`c_{ij}` is equal\n to the product of the square root of node degrees:\n :math:`\\sqrt{|\\mathcal{N}(i)|}\\sqrt{|\\mathcal{N}(j)|}`. :math:`\\sigma` is an activation\n function.\n\n The model parameters are initialized as in the\n `original implementation `__ where\n the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization\n and the bias is initialized to be zero.\n\n Notes\n -----\n Zero in degree nodes could lead to invalid normalizer. A common practice\n to avoid this is to add a self-loop for each node in the graph, which\n can be achieved by:\n\n >>> g = ... # some DGLGraph\n >>> g.add_edges(g.nodes(), g.nodes())\n\n\n Parameters\n ----------\n in_feats : int\n Input feature size.\n out_feats : int\n Output feature size.\n norm : bool, optional\n If True, the normalizer :math:`c_{ij}` is applied. Default: ``True``.\n bias : bool, optional\n If True, adds a learnable bias to the output. Default: ``True``.\n activation: callable activation function/layer or None, optional\n If not None, applies an activation function to the updated node features.\n Default: ``None``.\n\n Attributes\n ----------\n weight : torch.Tensor\n The learnable weight tensor.\n bias : torch.Tensor\n The learnable bias tensor.\n \"\"\"\n def __init__(self,\n in_feats,\n out_feats,\n num_gfc_layers=2,\n num_stats_in=1,\n num_stats_out=1,\n activation=None):\n super(GcapsConv, self).__init__()\n self._in_feats = in_feats\n self._out_feats = out_feats\n self._num_stats_in = num_stats_in\n self._num_stats_out = num_stats_out\n self._num_gfc_layers = num_gfc_layers\n self._activation_func = activation\n\n self._gin=conv.GINConv(None,'sum')\n self._stat_layers = nn.ModuleList()\n for _ in range(self._num_stats_out):\n gfc_layers = nn.ModuleList()\n curr_input_dim = self._in_feats * self._num_stats_in\n for _ in range(self._num_gfc_layers):\n gfc_layers.append(nn.Linear(curr_input_dim,self._out_feats))\n curr_input_dim = self._out_feats\n\n self._stat_layers.append(gfc_layers)\n\n\n \n def reset_parameters(self):\n \"\"\"Reinitialize learnable parameters.\"\"\"\n gain=nn.init.calculate_gain('relu')\n\n for i in range(self._num_stats_out):\n for j in range(self._num_gfc_layers):\n nn.init.xavier_normal(self._stat_layers[i][j].weight,gain=gain)\n\n\n def forward(self, graph, x_in):\n r\"\"\"Compute graph convolution.\n\n Notes\n -----\n * Input shape: :math:`(N, *, \\text{in_feats})` where * means any number of additional\n dimensions, :math:`N` is the number of nodes.\n * Output shape: :math:`(N, *, \\text{out_feats})` where all but the last dimension are\n the same shape as the input.\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n feat : torch.Tensor\n The input feature\n\n Returns\n -------\n torch.Tensor\n The output feature\n \"\"\"\n\n graph = graph.local_var()\n\n norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)\n shp = norm.shape + (1,) * (x_in.dim() - 1)\n norm = th.reshape(norm, shp).to(x_in.device)\n x_in = x_in*norm\n \n x = x_in\n\n output = []\n for i in range(self._num_stats_out):\n out = self._gin(graph,x)\n for j in range(self._num_gfc_layers):\n out = self._stat_layers[i][j](out)\n out = self._activation_func(out)\n\n output.append(out) \n x = th.mul(x, x_in)\n\n \n output = th.cat(output,dim=-1)\n return output\n\n def extra_repr(self):\n \"\"\"Set the extra representation of the module,\n which will come into effect when printing the model.\n \"\"\"\n summary = 'in={_in_feats}, out={_out_feats}, p_in={_num_stats_in}, p_out={_num_stats_out}, gfc={_num_gfc_layers}'\n if '_activation' in self.__dict__:\n summary += ', activation={_activation}'\n if '_reducer' in self.__dict__:\n summary += ', reducer={_reducer}'\n\n return summary.format(**self.__dict__)\n\n","repo_name":"hellboy5/gcn_shock_graph","sub_path":"models/gcaps_conv.py","file_name":"gcaps_conv.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"24"} +{"seq_id":"18389399854","text":"import logging\nimport re\nimport typing\n\nfrom fastapi import Request, Response\nfrom starlette.middleware.base import BaseHTTPMiddleware\nfrom starlette.types import ASGIApp\n\nfrom app.core.config import settings\nfrom app.helpers.enums import ContentTypeEnum\nfrom app.helpers.re import SPACES_AND_TAB_REGEX\n\nlogger = logging.getLogger(__name__)\n\nclass LogRequestMiddleware(BaseHTTPMiddleware):\n def __init__(self, app: ASGIApp) -> None:\n super().__init__(app)\n self.excluded_urls = [url for url in settings.LOGGING_EXCLUDED_URL.split(',') if url]\n\n async def dispatch(self, request: Request, call_next: typing.Awaitable[Response]):\n if settings.LOGGING_PARAMS_REQUEST_ENABLED and not self.path_is_in_excluded_urls(path=request.scope.get('path')):\n await self.log_request(request)\n response: Response = await call_next(request)\n return response\n \n async def log_request(self, request: Request):\n log_dict = {\n 'Method': request.method,\n 'Path': request.scope.get('path'),\n 'Params': request.query_params,\n }\n\n log_str = await self.build_log_str_from_dict(log_dict)\n logger.info(f\"Incoming request: {log_str}\")\n \n def path_is_in_excluded_urls(self, path: str):\n for url in self.excluded_urls:\n if url in path:\n return True\n return False\n\n async def build_log_str_from_dict(self, dict: dict):\n arr_log = [f\"{k}: {v if v else None}\" for k, v in dict.items()]\n return ', '.join(arr_log)\n\ndef get_content_type(dict: dict):\n for k, v in dict.items():\n if k.lower() == 'content-type':\n return v\n\nasync def log_request_body(request: Request):\n # Log request body in api_router instead of middleware.\n if settings.LOGGING_BODY_REQUEST_ENABLED:\n content_type = get_content_type(request.headers)\n if request.method.upper() != 'GET' and content_type == ContentTypeEnum.ApplicationJson.value:\n buff: bytes = await request.body()\n body: str = re.sub(SPACES_AND_TAB_REGEX, \"\", buff.decode('utf-8'))\n logger.info(f\"Request body: {body}\")\n","repo_name":"anhdhbn/fastapi-boilerplate","sub_path":"app/middlewares/log_request.py","file_name":"log_request.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"30016728485","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\n\nfrom mozio import views\n\nadmin.autodiscover()\n\nurlpatterns = patterns('django.views.generic.simple',\n url(r'^$', views.index),\n url(r'^edit/', views.edit_polygons, name='edit'),\n url(r'^save/', views.save_polygons, name='save_polygons'),\n url(r'^get/', views.get_polygons, name='get_polygons'),\n url(r'^show/', views.search_map, name='show'),\n url(r'^find/', views.find_suppliers, name='find_suppliers'),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"dmitryax/mozio-test","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"24445383588","text":"from datetime import datetime, timedelta\n\nimport markdown\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.dateparse import parse_date\n\nfrom core.const import FOCUS_FACTOR\nfrom core.models import Bucket, DayCache, TimeSpan\nfrom core.utils import contrasting_text_color\n\n\ndef time_span_to_json(queryset, running_id=None):\n return [\n {\n 'start': instance.start,\n 'end': instance.get_end(),\n 'title': instance.bucket.title,\n 'color': instance.bucket.color,\n 'textColor': contrasting_text_color(instance.bucket.color),\n 'comment': markdown.markdown(instance.comment or ''),\n 'url': reverse('admin:core_timespan_change', args=(instance.id, )),\n 'bucket_url': reverse('admin:core_bucket_change', args=(instance.bucket.id, )),\n 'rendering': 'background' if instance.bucket.type == Bucket.CLIENTS else '',\n 'className': [\n 'current_event' if instance.id == running_id else '',\n 'small' if (instance.get_end() - instance.start).total_seconds() < 900 else '',\n ]\n } for instance in queryset\n ]\n\n\ndef time_span_list(request):\n start = request.GET.get('start')\n end = request.GET.get('end')\n assert start and end, 'missing GET params'\n # those fitting start-end and those in progress\n queryset = TimeSpan.objects.filter(\n Q(start__gte=start, end__lte=end) | Q(start__gte=start, end__isnull=True)\n )\n running = TimeSpan.objects.filter(bucket__type=Bucket.FOCUSED, end__isnull=True)\n data = time_span_to_json(queryset, running.get().id if running else None)\n return JsonResponse(data, safe=False)\n\n\ndef dashboard(request, start=None):\n running = TimeSpan.objects.filter(end__isnull=True)\n running_focused = running.filter(bucket__type=Bucket.FOCUSED)\n running_buckets = [o.bucket for o in running]\n\n return render(request, 'core/dashboard.html', {\n 'running': running_buckets,\n 'recent': Bucket.objects.filter(\n last_started__gte=(datetime.today() - timedelta(days=settings.RECENT_DAYS)),\n ).exclude(\n id__in=[_.id for _ in running_buckets]\n ).order_by('-last_started'),\n 'defaultDate': \"'%s'\" % start if start else 'null',\n 'title': running_focused.get().bucket.title if running_focused else '',\n 'FOCUS_FACTOR': FOCUS_FACTOR\n })\n\n\ndef toggle(request, title):\n running = TimeSpan.objects.filter(bucket__title=title, end__isnull=True)\n if running:\n return end_time_span(request, title)\n else:\n return start_time_span(request, title)\n\n\ndef start_time_span(request, id_):\n bucket = Bucket.objects.get(id=id_)\n if TimeSpan.objects.filter(end__isnull=True, bucket__type=Bucket.FOCUSED).count():\n return HttpResponse(\"focused task already in progress, can't have two\", status=400)\n TimeSpan.objects.create(start=datetime.now(), bucket=bucket)\n return HttpResponse('ok')\n\n\ndef end_time_span(request, id_):\n # FIXME: call it end_focused_task\n if id_:\n open_span = TimeSpan.objects.filter(bucket__id=id_).get(end__isnull=True)\n else:\n # or close latest focused\n open_span = TimeSpan.objects.filter(bucket__type=Bucket.FOCUSED).get(end__isnull=True)\n open_span.end = datetime.now()\n open_span.save()\n return HttpResponse('ok')\n\n\ndef insight(request, start, end):\n # FIXME: dirty hack, recalculate today so that progress bars show truth\n # Fix by treating today differently, skip cache\n DayCache.recalculate(datetime.today().date())\n\n return JsonResponse(data={\n str(o.date): o.data\n for o in DayCache.objects.filter(date__gte=parse_date(start), date__lte=parse_date(end))\n }, safe=False)\n\n\ndef tree(request):\n return render(request, 'core/tree.html')\n","repo_name":"bartekbrak/kinro","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"24"} +{"seq_id":"26102988305","text":"import win32gui as win32\nfrom tkintertemplate import *\nfrom pynputtemplate import on_press,on_release\nfrom tkinter import messagebox\n\nwindow.title(\"Input Display\")\ncurrent_window = (win32.GetWindowText(win32.GetForegroundWindow()))\ndesired_window = \"Input Display\"\nglobal x\nx = \"0\"\ny=\"0\"\ndef on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n window.destroy() \ndef exit_window():\n on_closing()\ndef tell_inputs():\n global y\n y = str(int(y)+1)\n tell_button_presses.config(text = y)\ndef keep_updating():\n window.mainloop()\n window.after(1,keep_updating)\n\ntell_button_presses = tk.Label(\ntext=x,\nfont=(25),\n)\nclose_button = tk.Button(\n text=\"Close\",\n command=exit_window,\n width=20,\n height=4,\n)\n\nclose_button.pack()\ntell_button_presses.pack()\nwindow.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\nwindow.mainloop()","repo_name":"happysmileyface/other","sub_path":"buttondisplay.py","file_name":"buttondisplay.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"39158992176","text":"from typing import List\n\nimport numpy as np\n\nfrom supervision.detection.core import Detections\n\n\ndef mock_detections(\n xyxy: List[List[float]],\n confidence: List[float] = None,\n class_id: List[int] = None,\n tracker_id: List[int] = None,\n) -> Detections:\n return Detections(\n xyxy=np.array(xyxy, dtype=np.float32),\n confidence=confidence\n if confidence is None\n else np.array(confidence, dtype=np.float32),\n class_id=class_id if class_id is None else np.array(class_id, dtype=int),\n tracker_id=tracker_id\n if tracker_id is None\n else np.array(tracker_id, dtype=int),\n )\n\n\ndef assert_almost_equal(actual, expected, tolerance=1e-5):\n assert abs(actual - expected) < tolerance, f\"Expected {expected}, but got {actual}.\"\n","repo_name":"roboflow/supervision","sub_path":"test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":7008,"dataset":"github-code","pt":"24"} +{"seq_id":"5460956070","text":"import math\nfrom os import system, name\nimport random\n\n# APP: 02\n# DATE: 20.02.2023\n# Description: create an app/game for guessing the random number, user can enter min and max value and\n# amount of available mistakes. After reaching maximum mistakes the game ends\n\nmin_number = 0\nmax_number = 0\nmistakes = 0\nuser_score = 0\ninvalid_input_text = 'Enter a valid number'\n\n\ndef generate_random_number(min, max):\n return random.randint(min, max)\n\nwhile mistakes <= 0:\n try:\n mistakes = math.ceil(float(input('Enter available mistakes: ')))\n except:\n print(invalid_input_text)\n\nwhile min_number <= 0:\n try:\n min_number = math.ceil(float(input('Enter min number: ')))\n except:\n print(invalid_input_text)\n\nwhile max_number <= min_number:\n try:\n max_number = math.ceil(float(input('Enter max number: ')))\n except:\n print(invalid_input_text)\n\nuser_number = -1\ntarget_number = -1\n\nwhile mistakes >= 0:\n\n if target_number == -1:\n target_number = generate_random_number(min_number, max_number)\n else:\n print('Mistakes left: ' + str(mistakes))\n\n user_number = math.ceil(float(input('Enter number: ')))\n\n if user_number == target_number:\n user_score += 1\n target_number = generate_random_number(min_number, max_number)\n print('Congrats, you guessed the number!')\n else:\n print('Wrong number')\n mistakes -= 1\n\nprint('Final score: ' + str(user_score))\n\n# tests\nassert generate_random_number(10, 100) in range(10, 100)\nassert generate_random_number(10, 100) not in range(0, 5)\n","repo_name":"Domino731/python_sandbox","sub_path":"guess_the_number/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"71482665341","text":"#!/usr/bin/python3\n\"\"\"A ALx TDD test for task 2\"\"\"\n\n\ndef say_my_name(first_name, last_name=\"\"):\n \"\"\"Prints full name to screen\"\"\"\n if not isinstance(first_name, str):\n raise TypeError(\"first_name must be a string\")\n if not isinstance(last_name, str):\n raise TypeError(\"last_name must be a string\")\n print(f\"My name is {first_name} {last_name}\")\n\n\nif __name__ == \"__main__\":\n say_my_name()\n","repo_name":"Lemlem-yo/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/3-say_my_name.py","file_name":"3-say_my_name.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"24"} +{"seq_id":"1141214099","text":"from collections import deque\n\n# 노드와 간선 개수 입력받기\nv, e = map(int, input().split())\n# 각 노드의 진입 차수를 기록할 리스트를 0으로 초기화\nindegree = [0] * (v + 1)\n# 각 노드에 연결된 간선 정보를 담기 위한 연결 리스트 (그래프) 초기화\ngraph = [[] for _ in range(v + 1)]\n\n# 방향 그래프의 모든 간선 정보를 입력받기\nfor _ in range(e):\n a, b = map(int, input().split())\n # a에서 b로 가는 간선\n graph[a].append(b)\n # 진입 차수를 1 증가\n indegree[b] += 1\n\ndef topology_sort():\n result = []\n q = deque()\n for i in range(1, v + 1):\n if indegree[i] == 0:\n q.append(i)\n \n while q:\n a = q.popleft()\n result.append(a)\n for b in graph[a]:\n indegree[b] -= 1\n if indegree[b] == 0:\n q.append(b)\n\n for i in result:\n print(i, end=\" \")\n\ntopology_sort()\n","repo_name":"beomseok-kang/Algorithms-Python","sub_path":"이코테/C10/topology_sort.py","file_name":"topology_sort.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"2926680088","text":"def greet(): #definition of function\n print('Hello') #prints when called\ngreet()\n\ndef greet():\n return 'Hello' #returns a value which can be assigned or printed when called\nprint(greet())\n\ndef greet():\n print('Hello')\nprint(greet())\na=greet()\nprint(a) #assigns none value\n\ndef greet(a,b): #function with defined arguments\n print('Hello,',a,'and',b)\ngreet('anmol','pardeep')\n\na=5\nprint('id of a',id(a))\ndef val(x):\n print('id of x',id(x)) #address of a and x is same\n x=10 #value changes in function\n print('id of x',id(x)) #adress of x is changed\n print('a ',x)\nval(a)\nprint('a ',a) #value of a outside function is unchanged\nprint('id of a',id(a)) #adress of a\n\na=[1,2]\ndef val(x):\n print(id(x))\n x[0]=0\n print(id(x)) #adress of variable is not changed as variable itself is same\n print(x)\n del x #does not delete the list a\nval(a)\nprint(a)\n\ndef a(x,*y): #will assign first value to x and rest of values to tuple y\n print(x,y[0],y[1]) #index shouldn't be out of range of arguments\na(1,1,2,3)\n\ndef a(x,*y):\n for i in y:\n print(x,i) #prints every index value\na(1,1,2,3)\n\n\ndef a(**x): #takes values as dict mapping\n print(x['fname'])\na(fname='anmol',lname='virk')\n\ndef a(**x): #takes values as dict mapping\n for i,j in x.items():\n print(i,j)\na(fname='anmol',lname='virk')\n\n#scope of variable\na,b,c=1,2,3\ndef fun():\n a=4 #local variable in function doesn't affect global a\n global d #defines a global variable\n d=5\n x=globals()['b'] #access the global variable outside of function\n globals()['c']=4 #changes value of global variable\n print('a',a)\n print('d',d)\n print('x',x)\nfun()\nprint('a',a)\nprint('b',b)\nprint('c',c)\nprint('d',d)\n\nlist=['anmol','pardeep','sandeep'] #list as argument\ndef fun(a): #function to count a and e\n x,y=0,0\n for i in a:\n x=x+i.count('a')\n y=y+i.count('e')\n return x,y\na,e=fun(list)\nprint('No. of a is {} and e is {}'.format(a,e))\n\n#function to create fibonacci series\nn=int(input(\"Enter the no. of terms: \"))\na0,a1=0,1\nwhile n<=0:\n n=int(input('Enter a number greater than 0: '))\nif n==1:\n print(a0)\nelse:\n print(a0,a1,end=' ')\n for i in range(2,n):\n c=a0+a1\n a0,a1=a1,c\n print(c, end=' ')\n\n#function for factorial\ndef fact(n):\n f=1\n for i in range(1,n+1):\n f=f*i\n return f\nx=int(input('Enter a number: '))\nprint(fact(x))\n\n#factorial using recursion\ndef fact(n):\n if n==0:\n return 1\n return n*fact(n-1)\nx=int(input('Enter a no.: '))\nprint(fact(x))\n\n#anonymous function lambda\nx=lambda a,b: a**b #any no. of arguments but only one expression\nprint(x(2,3))\n\n#filter,map and reduce\na=[1,2,3,4,5,6,7,8,9,0]\nevens=list(filter(lambda n : n%2==0, a)) #filter the elements for which argument function(lambda or predefined function) returns true\nprint(evens)\ndoubles=list(map(lambda n:2*n,evens)) #applies the function on elements and returns\nprint(doubles)\nfrom functools import reduce\nsum=reduce(lambda a,b:a+b,doubles)\nprint(sum)\n\n#decorators\ndef div(a,b):\n return a/b\ndef smart_div(func): #decorator function to modify another function\n def inner(a,b):\n if alist[j+1]:\n list[j],list[j+1]=list[j+1],list[j]\n\n\nlist=[9,3,2,1,6,5,0,8]\nsort(list)\nprint(list)\n\n# Function to sort a list with minimum position\ndef sort(list):\n for i in range(len(list)-1):\n minpos=i\n for j in range(i+1,len(list)):\n if list[minpos]>list[j]:\n minpos=j\n list[i],list[minpos]=list[minpos],list[i]\n\n\nlist=[9,3,2,1,6,5,0,8]\nsort(list)\nprint(list)","repo_name":"anmolvirk1997/myrep1","sub_path":"8.functions.py","file_name":"8.functions.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"74729673342","text":"import os\nimport numpy as np\nimport faiss\n\n\ndef get_index():\n \"\"\"\n Create and return a Faiss index based on embeddings stored in 'dataset/embeddings.npy'\n\n Parameters:\n None\n\n Returns:\n faiss.IndexFlatL2: a Faiss flat L2 index containing the loaded embeddings\n \"\"\"\n\n # Check if the embeddings file exists\n if os.path.exists('dataset/embeddings.npy'):\n # Load the embeddings\n embeddings = np.load('dataset/embeddings.npy')\n else:\n print('No existing embeddings found')\n\n # Create a Faiss index (a simple flat L2 index)\n index = faiss.IndexFlatL2(embeddings.shape[1])\n\n # Add vectors to the index\n index.add(embeddings)\n\n return index\n\ndef similarity_search(vec, k, index):\n \"\"\"\n Perform similarity search on a Faiss index\n\n Parameters:\n - vec (numpy.ndarray): the vector for which to search similar vectors\n - k (int): the number of nearest neighbors to retrieve\n - index (faiss.IndexFlatL2): the Faiss index to search\n\n Returns:\n tuple: a tuple containing two arrays, which are D (distances) and I (indices) of the k nearest neighbors\n \"\"\"\n\n # Search the index (D is distance, I is index of neighbors)\n D, I = index.search(vec, k)\n return D, I\n\ndef index_dataset(df):\n # Check if the embeddings file exists\n if os.path.exists('dataset/embeddings.npy'):\n # Load the embeddings\n embeddings = np.load('dataset/embeddings.npy')\n else:\n # Call the function to encode the dataset\n embeddings = encode_dataset(df)\n np.save('dataset/embeddings.npy', embeddings)\n\n # Create a Faiss index - here we use a simple flat L2 index\n index = faiss.IndexFlatL2(embeddings.shape[1])\n\n # Add vectors to the index\n index.add(embeddings)\n\n return index\n","repo_name":"K0EKJE/News-Recommendation","sub_path":"utils/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"8792698609","text":"import cv2\nfrom discordwebhook import Discord\nimport datetime\nimport copy\n\n# Discord Webhook URL\ndiscord = Discord(url=\"your Discord Webhook URL\")\n\ntemp = cv2.imread(\"./pic/temp.jpg\") #テンプレート画像\ntemp = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY) #グレイスケールに変換\n\ncapture = cv2.VideoCapture(0)\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\n\n# テンプレートマッチング\ndef template(img,img_color):\n h, w = temp.shape[0],temp.shape[1]\n match = cv2.matchTemplate(img, temp, cv2.TM_SQDIFF_NORMED)\n min_value, max_value, min_pt, max_pt = cv2.minMaxLoc(match)\n pt = min_pt\n temp_out = copy.deepcopy(img_color[pt[1]:pt[1]+h,pt[0]:pt[0]+w])\n # 決め打ち\n if 500 0:\n n //= p\n exponent += n\n return exponent\n\ndef main(n=2000_0000, k=1500_0000):\n total_sum = 0\n\n for p in primerange(1,n):\n exponent = legendre(n, p) - legendre(k, p) - legendre(n - k, p)\n total_sum += exponent * p\n\n return total_sum\n","repo_name":"sorrowise/euler","sub_path":"231.py","file_name":"231.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"24"} +{"seq_id":"18214027999","text":"# \"\"\"\n# The build/compilations setup\n#\n# >> pip install -r requirements.txt\n# >> python setup.py install\n# \"\"\"\nimport pip\nimport logging\nimport pkg_resources\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\ntry:\n install_reqs = _parse_requirements(\"requirements.txt\")\nexcept Exception:\n logging.warning('Fail load requirements file, so using default ones.')\n install_reqs = []\n\nsetup(\n name='jpt_tracker',\n version='0.1',\n author='David S Hayden',\n author_email='dshayden@mit.edu',\n license='MIT',\n description='Joint Posterior Multi-Object Tracker',\n packages=[\"jpt\"],\n scripts=[\n 'scripts/jpt_runPointTracker',\n 'scripts/jpt_evalX_conditional',\n 'scripts/jpt_evalZ_conditional',\n 'scripts/jpt_samples2matlab',\n 'scripts/jpt_drawDist_toy',\n 'scripts/jpt_run2dPointTracker',\n 'scripts/jpt_generate_confusion_dataset',\n 'scripts/jpt_evalMOT_point2d',\n 'scripts/jpt_plot_mot',\n 'scripts/jpt_samples2mot_point2d',\n 'scripts/jpt_evalX_conditionalAll',\n ],\n install_requires=install_reqs,\n dependency_links=[\n ],\n include_package_data=True,\n python_requires='>=3.6',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Scientific/Engineering :: Image Segmentation\",\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=\"tracking computer vision bayesian\",\n)\n","repo_name":"dshayden/jpt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"38400081305","text":"\"\"\"GDAL VRT writing classes and methods.\"\"\"\nimport itertools\nimport pickle\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\nfrom typing import Sequence\nfrom xml.sax import saxutils\n\nimport xmlschema\nfrom osgeo import gdal, osr\n\nfrom . import schemas\nfrom .constants import VALID_SRS\n\ntry:\n import importlib.resources as pkg_resources\nexcept ImportError:\n # https://stackoverflow.com/questions/6028000/how-to-read-a-static-file-from-inside-a-python-package\n import importlib_resources as pkg_resources\n\n\ndef get_vrt_schema():\n \"\"\"Load gdal VRT xml schema as xmlschema.XMLSchema object, used for encoding and decoding data structures.\n\n First try loading from pickle, otherwise parse from xsd file.\n\n Returns:\n (xmlschema.XMLSchema): Parsed schema object.\n \"\"\"\n try:\n with pkg_resources.open_binary(schemas, \"gdalvrt.pickle\") as f:\n schema = pickle.load(f)\n except:\n with pkg_resources.open_text(schemas, \"gdalvrt.xsd\") as f:\n schema = xmlschema.XMLSchema(f)\n\n return schema\n\n\ndef escape(s):\n \"\"\"Escape a string for embeding in xml. For example required for WKT strings.\"\"\"\n return saxutils.escape(s, entities={\"'\": \"'\", '\"': \""\"})\n\n\nclass VRTWriter:\n schema = get_vrt_schema()\n VRTDATASET_SUBCLASSES = (\"VRTWarpedDataset\", \"VRTPansharpenedDataset\")\n VRTRASTERBAND_SUBCLASSES = (\"VRTRawRasterBand\", \"VRTDerivedRasterBand\")\n REPEATABLE_ELEMENTS = (\"Metadata\", \"VRTRasterBand\", \"Overview\", \"SimpleSource\")\n REPEATABLE_ELEMENTS_KEY_FUNC = dict(\n zip(\n REPEATABLE_ELEMENTS,\n (\n lambda d: d[\"@domain\"],\n lambda d: d[\"@band\"],\n lambda d: d[\"SourceFilename\"],\n lambda d: d[\"SourceBand\"],\n ),\n )\n )\n VRTRASTERBAND_COLOR_INTERP = (\n \"Gray\",\n \"Palette\",\n \"Red\",\n \"Green\",\n \"Blue\",\n \"Alpha\",\n \"Hue\",\n \"Saturation\",\n \"Lightness\",\n \"Cyan\",\n \"Magenta\",\n \"Yellow\",\n \"Black\",\n \"Unknown\",\n )\n VRTRASTERBAND_PIX_FUNC = (\n \"real\",\n \"imag\",\n \"complex\",\n \"mod\",\n \"phase\",\n \"conj\",\n \"sum\",\n \"diff\",\n \"mul\",\n \"cmul\",\n \"inv\",\n \"intensity\",\n \"sqrt\",\n \"log10\",\n \"dB\",\n \"dB2amp\",\n \"dB2pow\",\n )\n\n def __init__(self):\n self.vrt = dict()\n\n def add_vrtdataset(self, xsize, ysize, subclass=None):\n \"\"\"Add the root element of a VRTDataset.\n\n Args:\n xsize (int): A positive integer describing total width in pixels of dataset.\n ysize (int): A positive integer describing total height in pixels of dataset.\n subclass (str, optional): Optional subclass attribute of VRTDataset. Valid\n values are `'VRTWarpedDataset'` or `'VRTPansharpenedDataset'`. Defaults to\n None.\n\n Raises:\n ValueError: If subclass has invalid value.\n \"\"\"\n if subclass is not None and subclass not in self.VRTDATASET_SUBCLASSES:\n raise ValueError(f\"Invalid subclass {subclass} for VRTDataset element.\")\n\n self.update_element(\n \"VRTDataset\",\n {\"@rasterXSize\": xsize, \"@rasterYSize\": ysize},\n {\"@subClass\": subclass},\n )\n\n def add_srs(self, srs=None, wkt=None, user_input=None, axis_mapping=None):\n \"\"\"Add SRS element to VRTDataset.\n\n Note:\n Wkt representations as strings will have special characters `<`, `>`, `&`,\n `'`, and `\"` escaped.\n Args:\n srs (osr.SpatialReference, optional): A SpatialReference object representing\n CRS of dataset. The wkt representation srs will be used in the VRTDataset.\n wkt (str, optional): Wkt representation of a CRS of a dataset.\n user_input (str, optional): Any valid input to osr.SetFromUserInput. Note that\n user_input is not escaped, so use srs or wkt for Wkt representations.\n axis_mapping (Sequence, optional): An optional attribute of SRS element.\n Describes mapping between data axis and CRS axis. If None, implies a\n GIS_TRADITIONAL_GIS_ORDER to CRS axis mapping strategy. Only valid in GDAL >=\n 3.\n\n Raises:\n ValueError: If srs is not a valid SpatialReference object.\n ValueError: If not at least one of srs, wkt, or user_input is defined.\n \"\"\"\n if srs and isinstance(srs, osr.SpatialReference):\n if not srs.Validate() == VALID_SRS:\n raise ValueError(\"srs is not a valid SpatialReference object.\")\n wkt = escape(srs.ExportToWkt())\n elif wkt and isinstance(wkt, str):\n wkt = escape(wkt)\n elif user_input and isinstance(user_input, str):\n pass\n else:\n raise ValueError(\n \"Invalid use of input arguments. One of srs, wkt, or user_input should be used.\"\n )\n\n if axis_mapping:\n if not isinstance(axis_mapping, Sequence):\n raise ValueError(\n \"axis_mapping should sequence of values mapping the axis order of CRS to axis order of coordinate transform metadata.\"\n )\n\n self.update_element(\n \"SRS\", {\"$\": wkt or user_input}, {\"@dataAxisToSRSAxisMapping\": axis_mapping}\n )\n\n def add_geotransform(self, geotransform):\n \"\"\"Add GeoTransform element to VRTDataset.\n\n Args:\n geotransform (Sequence): Geotransform for VRTDataset.\n\n Raises:\n ValueError: If geotransform is not a six element sequence of values.\n \"\"\"\n if not isinstance(geotransform, Sequence) or len(geotransform) != 6:\n raise ValueError(\"geotransform must be a six element sequence of values.\")\n\n self.update_element(\"GeoTransform\", {\"$\": \", \".join(map(str, geotransform))})\n\n def add_gcps(self, gcps, srs=None):\n if not isinstance(gcps, Sequence) and not isinstance(gcps[0], gdal.GCP):\n raise ValueError(\"gcps must be a Sequence of gdal.GCP objects.\")\n if srs and isinstance(srs, osr.SpatialReference):\n if not srs.Validate() == VALID_SRS:\n raise ValueError(\"srs is not a valid SpatialReference object.\")\n else:\n raise ValueError(\"srs is not a valid SpatialReference object.\")\n\n wkt = escape(srs.ExportToWkt())\n axis_mapping = (\n None\n if not hasattr(srs, \"GetDataAxisToSRSAxisMapping\")\n else \",\".join(map(str, srs.GetDataAxisToSRSAxisMapping()))\n )\n sub_element = {\"GCP\": []}\n for gcp in gcps:\n sub_element[\"GCP\"].append(\n {\n \"@Id\": gcp.Id,\n \"@Info\": gcp.Info,\n \"@Pixel\": gcp.GCPPixel,\n \"@Line\": gcp.GCPLine,\n \"@X\": gcp.GCPX,\n \"@Y\": gcp.GCPY,\n \"@Z\": gcp.GCPZ,\n }\n )\n\n self.update_element(\n \"GCPList\",\n sub_element,\n {\"@Projection\": wkt, \"@dataAxisToSRSAxisMapping\": axis_mapping},\n )\n\n def add_metadata(self, metadata, domain=None, band=None):\n if not isinstance(metadata, dict):\n raise ValueError(\"metadata must be a mapping of key value pairs\")\n if domain:\n if not isinstance(domain, str):\n raise ValueError(\"domain must be a string representing metadata domain\")\n if band:\n parent = list(\n filter(lambda r: r[\"@band\"] == band, self.vrt[\"VRTRasterBand\"])\n )\n if len(parent) == 1:\n parent = parent[0]\n else:\n raise ValueError(f\"Could not add metadata to band {band}.\")\n else:\n parent = None\n\n sub_element = {\"MDI\": []}\n if not domain:\n sub_element.update({\"@domain\": \"\"})\n else:\n sub_element.update({\"@domain\": domain})\n for key, val in metadata.items():\n sub_element[\"MDI\"].append({\"@key\": key, \"$\": val})\n\n self.update_element(\"Metadata\", sub_element, parent=parent)\n\n def add_vrtrasterband(self, band, dtype=\"Byte\", subclass=None):\n if subclass is not None and subclass not in self.VRTRASTERBAND_SUBCLASSES:\n raise ValueError(f\"Invalid subclass {subclass} for VRTRasterBand element.\")\n\n self.update_element(\n \"VRTRasterBand\",\n {\"@band\": band, \"@dataType\": dtype},\n {\"@subClass\": subclass},\n )\n\n def add_pixelfunc(self, band, func):\n if func not in self.VRTRASTERBAND_PIX_FUNC:\n raise ValueError(\"Unsupported pixel function.\")\n\n parent = self.get_band_element(band)\n\n self.update_element(\"PixelFunctionType\", {\"$\": func}, parent=parent)\n\n def add_colorinterp(self, band, interp=\"Gray\"):\n if interp not in self.VRTRASTERBAND_COLOR_INTERP:\n raise ValueError(\n f\"{interp} is not a valid color interp value. Valid values are {self.VRTRASTERBAND_COLOR_INTERP}\"\n )\n\n parent = self.get_band_element(band)\n\n self.update_element(\"ColorInterp\", {\"$\": interp}, parent=parent)\n\n def add_nodata(self, band, nodata=None, hide=False):\n if isinstance(nodata, str):\n nodata = \"nan\"\n if isinstance(nodata, int):\n nodata = float(nodata)\n if not isinstance(nodata, (int, float, str)):\n raise ValueError(f\"NoDataValue must be a double or NaN type value.\")\n\n parent = self.get_band_element(band)\n\n self.update_element(\"NoDataValue\", {\"$\": nodata}, parent=parent)\n\n if hide:\n self.update_element(\n \"HideNoDataValue\", {\"$\": 1 if hide else 0}, parent=parent\n )\n\n def add_colortable(self, band, colors):\n if not (isinstance(colors, Sequence) and len(colors[0]) in (3, 4)):\n raise ValueError(f\"ColorTable must be a sequence of RGB/RGBA tuples\")\n parent = self.get_band_element(band)\n\n sub_element = {\"Entry\": []}\n for color in colors:\n sub_element[\"Entry\"].append(\n {f\"@c{i}\": c for i, c in enumerate(color, start=1)}\n )\n\n self.update_element(\"ColorTable\", sub_element, parent=parent)\n\n def add_description(self, band, desc):\n parent = self.get_band_element(band)\n\n self.update_element(\"Description\", {\"$\": desc}, parent=parent)\n\n def add_unittype(self, band, unittype=\"m\"):\n if unittype not in (\"m\", \"ft\"):\n raise ValueError(\"Invalid UnitType value. Valid values are 'm' or 'ft'.\")\n\n parent = self.get_band_element(band)\n\n self.update_element(\"UnitType\", {\"$\": unittype})\n\n def add_offset(self, band, offset=0.0):\n parent = self.get_band_element(band)\n\n self.update_element(\"Offset\", {\"$\": offset}, parent=parent)\n\n def add_scale(self, band, scale=1.0):\n parent = self.get_band_element(band)\n\n self.update_element(\"Scale\", {\"$\": scale}, parent=parent)\n\n def add_overview(self, band, source_filename, source_band, relative=False):\n parent = self.get_band_element(band)\n\n sub_element = {\n \"SourceFilename\": {\n \"@relativeToVRT\": 1 if relative else 0,\n \"$\": source_filename,\n },\n \"SourceBand\": {\"$\": str(source_band)},\n }\n\n self.update_element(\"Overview\", sub_element, parent=parent)\n\n def add_categorynames(self, band):\n raise NotImplementedError\n\n def add_rasterattrtable(self, band):\n raise NotImplementedError\n\n def add_source(\n self,\n band,\n source_filename,\n source_band,\n type=\"Simple\",\n src_xsize=None,\n src_ysize=None,\n src_dtype=None,\n src_block_xsize=None,\n src_block_ysize=None,\n src_win_xoff=None,\n src_win_yoff=None,\n src_win_xsize=None,\n src_win_ysize=None,\n dst_win_xoff=None,\n dst_win_yoff=None,\n dst_win_xsize=None,\n dst_win_ysize=None,\n relative=False,\n shared=True,\n open_options=None,\n ):\n parent = self.get_band_element(band)\n\n sub_element = {\n \"SourceFilename\": {\n \"@relativeToVRT\": 1 if relative else 0,\n \"@shared\": \"1\" if shared else \"0\",\n \"$\": source_filename,\n },\n \"SourceBand\": {\"$\": str(source_band)},\n }\n\n if any((src_xsize, src_ysize, src_block_xsize, src_block_ysize, src_dtype)):\n sub_element.update(\n {\n \"SourceProperties\": {\n \"@RasterXSize\": src_xsize,\n \"@RasterYSize\": src_ysize,\n \"@DataType\": src_dtype,\n \"@BlockXSize\": src_block_xsize,\n \"@BlockYSize\": src_block_ysize,\n }\n }\n )\n\n if any((src_win_xoff, src_win_yoff, src_win_xsize, src_win_ysize)):\n sub_element.update(\n {\n \"SrcRect\": {\n \"@xOff\": src_win_xoff,\n \"@yOff\": src_win_yoff,\n \"@xSize\": src_win_xsize,\n \"@ySize\": src_win_ysize,\n }\n }\n )\n if any((dst_win_xoff, dst_win_yoff, dst_win_xsize, dst_win_ysize)):\n sub_element.update(\n {\n \"DstRect\": {\n \"@xOff\": dst_win_xoff,\n \"@yOff\": dst_win_yoff,\n \"@xSize\": dst_win_xsize,\n \"@ySize\": dst_win_ysize,\n }\n }\n )\n\n if open_options:\n sub_element.update(\n {\n \"OpenOptions\": {\n \"OOI\": [\n {\"@key\": key, \"$\": val} for key, val in open_options.items()\n ]\n }\n }\n )\n\n self.update_element(f\"{type}Source\", sub_element, parent=parent)\n\n def to_string(self):\n \"\"\"Return a string representation of VRTDataset.\"\"\"\n return ET.tostring(self.schema.encode(self.vrt))\n\n def to_file(self, path):\n \"\"\"Write VRTDataset to file.\n\n Args:\n path (str or Pathlike): Path to write VRTDataset to.\n \"\"\"\n path = Path(path)\n with path.open(\"wb\") as f:\n f.write(self.to_string())\n\n def get_band_element(self, band):\n element = next(filter(lambda d: d[\"@band\"] == band, self.vrt[\"VRTRasterBand\"]))\n if not element:\n raise ValueError(\n f\"VRTRasterBand corresponding to index {band} does not exist.\"\n )\n\n return element\n\n def update_element(self, element, mapping, optional_mapping=None, parent=None):\n \"\"\"Update element of `vrt` corresponding to a valid VRT xml element. Will overwrite element if already exists.\n\n Args:\n element (str): Name of VRT element to update.\n mapping (dict): A dict containing required attributes and subelements of `element`.\n optional_mapping (dict): A dict containing optional attributes of `element`.\n Any key-value pairs will be added to the `element` provided the value is not\n None.\n \"\"\"\n if optional_mapping:\n optional_mapping = {\n key: value for key, value in optional_mapping.items() if value\n }\n else:\n optional_mapping = dict()\n\n if parent:\n node = parent\n else:\n node = self.vrt\n\n if element in self.REPEATABLE_ELEMENTS:\n d = []\n if element in node:\n n = len(node[element])\n touched = False\n for key, group in itertools.groupby(\n node[element], key=self.REPEATABLE_ELEMENTS_KEY_FUNC[element]\n ):\n if key in mapping.values():\n d.append({**mapping, **optional_mapping})\n touched = True\n else:\n d.append(list(group)[0])\n if len(d) == n and not touched:\n d.append({**mapping, **optional_mapping})\n else:\n d.append({**mapping, **optional_mapping})\n else:\n d = {**mapping, **optional_mapping}\n\n if element == \"VRTDataset\":\n node.update(d)\n else:\n node.update({element: d})\n\n @property\n def is_valid(self):\n try:\n self.schema.encode(self.vrt)\n return True\n except xmlschema.XMLSchemaEncodeError:\n return False\n\n def clear(self):\n \"\"\"Clear VRTDataset contents.\"\"\"\n self.vrt = dict()\n","repo_name":"underchemist/vrt","sub_path":"vrt_writer/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":17159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"24"} +{"seq_id":"74717840379","text":"#! python3\n\"\"\"\nAuthor: Joe DeFilippo\n1. Captures the current main page deals from slickdeals.net using BeautifulSoup\n2. Allows the user to maintain a persistent 'wishlist' of items/keywords to compare against the current deals\n3. The user can also perform custom queries against the current deals.\n\"\"\"\n\nimport logging, bs4, requests, webbrowser, shelve\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - - %(levelname)s - %(message)s')\n\n\ndef setupWishlist():\n \"\"\"\n Checks to see if the user already has a wishlist set up/saved on their filesystem.\n If yes, that wishlist is loaded, otherwise a new, blank wishlist is created as a set\n \"\"\"\n try:\n shelfFile = shelve.open('wishlist')\n wishlist = set(shelfFile['wishlist'])\n shelfFile.close()\n logging.info('Shelf file opened. Wishlist loaded.')\n except:\n wishlist = set()\n logging.info('Shelf file does not exist. Wishlist is blank.')\n return wishlist\n\ndef getDeals():\n \"\"\"\n Uses BeautifulSoup to get all anchor tags from slickdeals.net\n Any anchor that has a title is a deal\n The function returns a dictionary of deals with the title of the deals as the key, and the URL as the value\n \"\"\"\n deals = {}\n res = requests.get('http://slickdeals.net')\n res.raise_for_status()\n slickSoup = bs4.BeautifulSoup(res.text, features='lxml')\n elems = slickSoup.find_all('a')\n\n for i in range(0, len(elems)):\n if elems[i].get('title') != None:\n if elems[i].get('title') not in deals.keys():\n slickDeal = elems[i].get('title')\n deals[slickDeal] = 'http://www.slickdeals.net' + elems[i].get('href')\n return deals\n\ndef outputDealsToHTML(deals, outFile):\n \"\"\"\n Takes the dictionary containing deals and a file name and generates an HTML file which contains a numbered list\n of deals\n \"\"\"\n sdealshtml = open(outFile, 'w')\n sdealshtml.write('\\n\\nSlickDealer\\n\\n')\n sdealshtml.write('\\n
      \\n')\n for k in deals.keys():\n if k is not '':\n sdealshtml.write('\\t
    1. ' + k + '
    2. \\n\\t
      \\n')\n\n sdealshtml.write('
    \\n\\n')\n sdealshtml.close()\n logging.info('HTML file ' + outFile + ' written and closed')\n\ndef displayMenu(menu, menuPath):\n \"\"\"\n Prints a menu to the console (Ex main menu, wishlist maintenance menu, etc.)\n \"\"\"\n print(menuPath)\n print('---------')\n for k in menu.keys():\n print(k + ' ' + menu[k])\n\ndef saveWishlistToShelf(shelf_file, wishlist):\n \"\"\"\n Uses the shelve module to save the current wishlist to the user's local drive\n \"\"\"\n shelfFile = shelve.open(shelf_file)\n shelfFile[shelf_file] = set(wishlist)\n shelfFile.close()\n logging.info('Wishlist saved to shelf.')\n\ndef scrubHTMLfromString(string_to_clean):\n \"\"\"\n Removes HTML special character formatting from a string and returns the clean string\n \"\"\"\n string_to_clean = string_to_clean.replace('(', '(')\n string_to_clean = string_to_clean.replace(')', ')')\n string_to_clean = string_to_clean.replace('"', '\\\"')\n string_to_clean = string_to_clean.replace(''', '\\'')\n string_to_clean = string_to_clean.replace('?', '?')\n string_to_clean = string_to_clean.replace('#', '#')\n string_to_clean = string_to_clean.replace('!', '!')\n string_to_clean = string_to_clean.replace('$', '$')\n string_to_clean = string_to_clean.replace('%', '%')\n string_to_clean = string_to_clean.replace('&', '&')\n string_to_clean = string_to_clean.replace(':', ':')\n string_to_clean = string_to_clean.replace('.', '.')\n string_to_clean = string_to_clean.replace(',', ',')\n string_to_clean = string_to_clean.replace('@', '@')\n string_to_clean = string_to_clean.replace('/', '/')\n\n return string_to_clean\n\ndef displayWishlist(wlist):\n \"\"\"\n Prints the wishlist to the console\n \"\"\"\n if len(wlist) > 0:\n print(wlist)\n else:\n print('Wishlist is empty')\n\ndeals = getDeals()\nwishlist = setupWishlist()\n\nmain_menu = {'1.': 'View current deals in browser',\n '2.': 'Search Deals for Wishlist Items',\n '3.': 'Display My Wishlist',\n '4.': 'Wishlist Maintenance',\n '5.': 'Search Deals by Keyword',\n 'Q.': 'Quit'}\n\nwl_maintenance_menu = {'1.': 'Add item to Wishlist',\n '2.': 'Remove Item from Wishlist',\n '3.': 'Display My Wishlist',\n '4.': 'Back to Main Menu'}\n\noutFile = 'slickdealer.html'\noutputDealsToHTML(deals, outFile)\n\nwhile True:\n displayMenu(main_menu, 'Slick Dealer - Main Menu')\n menuSelection = input('Enter your selection---> ').lower()\n if menuSelection == 'q': # quit the program\n print('Thanks for using Slick Dealer. Goodbye!')\n quit()\n if menuSelection == '1': #show all current deals in browser\n print('Opening in default browser...')\n webbrowser.open(outFile)\n if menuSelection == '2': #search deals for wishlist items\n print('\\n')\n for item in wishlist:\n for k in deals:\n if item in k.lower():\n print('Wishlist item ' + item + ' found! --->' + scrubHTMLfromString(k) + ': ' + deals[k])\n print('\\n')\n\n if menuSelection == '3':\n displayWishlist(wishlist)\n if menuSelection == '4':\n while True:\n displayMenu(wl_maintenance_menu, 'Wishlist Maintenance')\n wl_maint_sel = input('Enter your selection---> ').lower()\n if wl_maint_sel == '1':\n addMore = 'y'\n while addMore == 'y':\n newItem = input('What keyword would you like to add to your wishlist?')\n wishlist.add(newItem.lower())\n\n displayWishlist(wishlist)\n addMore = input('Add another item? (y/n)').lower()\n saveWishlistToShelf('wishlist', wishlist)\n if wl_maint_sel == '2':\n removeMore = 'y'\n while removeMore == 'y' and len(wishlist) > 0:\n item_to_remove = input('What keyword would you like to remove from your wishlist?')\n try:\n wishlist.remove(item_to_remove)\n displayWishlist(wishlist)\n saveWishlistToShelf('wishlist', wishlist)\n except:\n print('Item not in wishlist')\n removeMore = input('Remove another item? (y/n)').lower()\n if wl_maint_sel == '3':\n displayWishlist(wishlist)\n if wl_maint_sel == '4':\n break\n\n if menuSelection == '5': # search keyword\n keep_searching = 'y'\n while keep_searching == 'y':\n keyword = input('Enter your keyword---> ').lower()\n if keyword in str(deals.keys()).lower():\n print('\\n')\n for k in deals:\n if keyword in k.lower():\n print(scrubHTMLfromString(k) + ': ' + deals[k])\n else:\n print('No deals for the given keyword.')\n print('\\n')\n keep_searching = input('Do you want to keep searching? (y/n)').lower()","repo_name":"joedefilippo/slickdealer","sub_path":"slickdealer.py","file_name":"slickdealer.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"38356253110","text":"while True:\n\ttry:\n\t\tfor _ in range(int(input())):\n\t\t\tn = int(input())\n\t\t\tTemp,DP = {},{}\n\t\t\tinputs = []\n\t\t\tfor _ in range(n):\n\t\t\t\tb,num = input().split()\n\t\t\t\tinputs.append((int(b),num))\n\t\t\tinputs = list(set(inputs))\n\t\t\tn = len(inputs)\n\t\t\tfor i in inputs:\n\t\t\t\tTemp = {}\n\t\t\t\tif i[0] == -1:\n\t\t\t\t\tminbase = max(i[1])\n\t\t\t\t\tminbase = max(int(minbase,36) + 1,2)\n\t\t\t\t\t#print('minbase ',i[1],minbase)\n\t\t\t\t\tfor j in range(minbase,37):\n\t\t\t\t\t\t#print( int(i[1], j))\n\t\t\t\t\t\tif int(i[1],j) not in Temp:\n\t\t\t\t\t\t\tTemp [int(i[1],j)] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tTemp [int(i[1],j)] += 1\n\t\t\t\t\t\t#print(Temp)\n\t\t\t\t\tfor k,v in Temp.items():\n\t\t\t\t\t\tif k not in DP:\n\t\t\t\t\t\t\tDP[k] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tDP[k] += 1\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1], i[0])not in DP:\n\t\t\t\t\t\tDP[ int(i[1],i[0]) ] = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tDP[ int(i[1],i[0]) ] += 1\n\n\t\t\tGotAns = False\n\t\t\t#print(DP)\n\t\t\tDP = sorted(DP.items(),key = lambda x : x[0])\n\t\t\tfor k,v in DP:\n\t\t\t\tif v == n:\n\t\t\t\t\tif k > 1000000000000:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tGotAns = True\n\t\t\t\t\t\tprint(k)\n\t\t\t\t\t\tbreak\n\t\t\tif not GotAns:\n\t\t\t\tprint(-1)\n\n\texcept:\n\t\tbreak","repo_name":"CapnSK/Competitive_Programming","sub_path":"CODECHEF/LONG CHALLENGES/2019/OCT19 LONG CHALLENGE/6_MSNG/dHANESHmsng.PY","file_name":"dHANESHmsng.PY","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"23"} +{"seq_id":"41840502432","text":"import tensorflow as tf\nimport _pickle as pk\nimport numpy as np\nfrom src.helper_funcs import save_model, visualize_embeddings\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport os\nimport sys\n\n\n# Path to input data.\ndata_in = 'C:/Users/admin/PycharmProjects/dictionary_bootstrapping_Byungkon_Kyung/simple-data.pkl'\nwith open(data_in, 'rb') as f:\n d = pk.load(f, encoding='latin1')\n\n# Path to tensorboard output files\nlog_dir = 'C:/Users/admin/PycharmProjects/tensorboard_output'\nmodels_dir = 'C:/Users/admin/PycharmProjects/models'\n\n# Path to embeddings initializations\n# init_embedd_dir = 'C:/Users/admin/PycharmProjects/NLP/embeddings_initialization_dim_200.npy'\n\n# uncomment the following line if you want to use pre-trained embeddings\n# embeddings_init = np.load(init_embedd_dir)\n\nbs = 64 # mini-batch size\ntd = 150 # embedding dimension\nhd = 300 # hidden dimension\n\nnw, mw, ms = d['def'].shape # total number of words, max num. of words per definition, max num. of senses per word\n# nw = len(d['id2dw'])\nparams = {} # hash to hold the trainable parameters\ntau = 10\n# We need the following 3 lines to account for the discrepancy between\n# the true number of words (true_nw) vs. the number of words w/ IDs (nw).\n# This discrepancy exists because of some designs choices that have not been modified.\ntrue_nw = nw\nmaxid = np.max(d['def'])\nif maxid >= nw:\n nw = maxid + 1\n\"\"\"\n Input placeholders and constant values\n\"\"\"\n# placeholders\ndf = tf.placeholder(name='def', dtype=tf.int32, shape=[None, mw, ms]) # Takes values from d['def']\ndm = tf.placeholder(name='dmask', dtype=tf.float32, shape=[None, mw, ms]) # Takes values from d['dmask']\nwmask = tf.placeholder(name='wmask', dtype=tf.float32, shape=[None, mw]) # Takes values from d['wmask']\nh_d = tf.placeholder(name='idf', dtype=tf.float32, shape=[None, mw]) # Takes values from d['idf']\nwi = tf.placeholder(name='wi', dtype=tf.int32) # batch of word indices\n# nwi = tf.placeholder(name='nwi', dtype=tf.int32) # batch of word indices (negative samples- if Hinge Loss is used)\npr = tf.placeholder(name='sprior', dtype=tf.float32, shape=[None, mw, ms])\nlr = tf.placeholder(name='lr', dtype=tf.float64) # learning rate\nbeta = tf.placeholder(name='beta', dtype=tf.float32) # beta for fixed point iteration\n\n# constants\n# NO constants for now\n\n\"\"\"\n Non-trainable parameters\n\"\"\"\nparams['dwe'] = tf.get_variable(name='dwe',\n shape=(nw, td),\n dtype=tf.float32,\n initializer=tf.initializers.random_uniform(minval=-0.1, maxval=+0.1),\n trainable=False) # disambiguated word embedding shape=(nw, td)\n\n\"\"\"\n Trainable parameters\n\"\"\"\n# params['L'] = tf.get_variable('L', shape=(td, ), dtype=tf.float32, initializer=tf.initializers.random_uniform(minval=-0.1, maxval=+0.1)) # td: diagonal entries only (of the td x td matrix)\nparams['L1'] = tf.get_variable('L1', shape=(td, hd), dtype=tf.float32, initializer=tf.initializers.random_uniform(minval=-0.1, maxval=+0.1)) # td x hd\nparams['L2'] = tf.get_variable('L2', shape=(hd, td), dtype=tf.float32, initializer=tf.initializers.random_uniform(minval=-0.1, maxval=+0.1)) # hd x td\n# params['b1'] = tf.get_variable('bias_1', shape=(hd,), dtype=tf.float32, initializer=tf.initializers.random_uniform(minval=-0.1, maxval=+0.1))\n# params['b2'] = tf.get_variable('bias_2', shape=(td,), dtype=tf.float32, initializer=tf.initializers.random_uniform(minval=-0.1, maxval=+0.1))\n\n\"\"\"\n Word vectors gathering\n\"\"\"\nwith tf.name_scope('Gather_WEs'):\n # ndw = tf.gather(params['dwe'], nwi, name='neg_samp') # bs x td (negative samples)\n pdw = tf.gather(params['dwe'], wi, name='pos_samp') # bs x td (positive samples)\n\n\"\"\"\n Expanding tensors to help with operations below \n\"\"\"\nwith tf.name_scope('Expand_dims'):\n wm = tf.expand_dims(wmask, axis=2) # bs x mw x 1\n idf = tf.expand_dims(h_d, axis=2) # bs x mw x 1\n\n\n\"\"\"\n Return alpha coefficients\n\"\"\"\nwith tf.name_scope('Alpha_coeffs'):\n\n def calc_alphas(_, args_):\n \"\"\"\n Returns: alpha coefficients of size mw x ms.\n d: indices of the senses that comprise the definition (mw x ms)\n m: mask of size mw x ms\n \"\"\"\n d, m = args_\n\n senses = tf.gather(params['dwe'], d) # mw x ms x td\n senses_norm = tf.nn.l2_normalize(senses, axis=2) # mw x ms x td, normalized vectors\n cos_sim = tf.reduce_sum(tf.reshape(senses_norm, [mw, ms, 1, 1, td]) * senses_norm, axis=4) # mw x ms x mw x ms\n\n # sum the similarities\n logits = tf.reduce_sum(cos_sim, axis=3) # mw x ms x mw\n\n # calculate the tau / |s(d_m)|\n cnt = tf.reshape(tf.reduce_sum(m, axis=1), [1, 1, mw], name='cnt') # 1 x 1 x mw (In other words, |s(d_m)|)\n logits = logits / cnt # mw x ms x mw\n\n # take rid of the NaN values generated from the above operation\n logits = tf.where(tf.is_nan(logits), tf.zeros_like(logits), logits) # mw x ms x mw\n\n # exponentiate and calculate product of context words probs\n logits = tf.exp(tau * logits) # mw x ms x mw\n logits = tf.reduce_prod(logits, axis=2) # mw x ms\n\n # here smooth the average of all senses\n sm = tf.reduce_sum(logits * m, axis=1, keepdims=True) # mw x 1\n logits = (logits * m) / sm # mw x ms\n logits = tf.where(tf.logical_or(tf.is_nan(logits), tf.is_inf(logits)), tf.zeros_like(logits), logits)\n\n return logits\n\n\n alphas = tf.scan(calc_alphas, [df, dm], initializer=tf.zeros(shape=[mw, ms]), name='alphas') # bs x mw x ms\n\n\"\"\"\n Calculate the double convex combination of senses of all plain words for a given definition and pass the resulting\n output to a 2-layer NN to get the new embedding\n\"\"\"\nwith tf.name_scope('Convex_comb_senses'):\n\n raw_emb = tf.reduce_sum(tf.expand_dims(alphas, axis=3) * tf.gather(params['dwe'], df), axis=2, name='senses_sum')\n e_i = tf.reduce_sum((raw_emb * idf) * wm, axis=1, name='pl_words_sum') # bs x td\n\nwith tf.name_scope(\"2_Layer_NN\"):\n\n new_emb = tf.tanh(tf.matmul(e_i, params['L1'], name='L1')) # bs x hd\n new_emb = tf.matmul(new_emb, params['L2'], name='L2') # bs x td, after passing through a 2-layer network\n\n\n\"\"\"\n Loss for regression\n\"\"\"\nwith tf.name_scope(\"Loss_calc\"):\n # Uncomment the following for NORMALIZATION\n # new_emb_norm = tf.nn.l2_normalize(new_emb, axis=1)\n # pdw_norm = tf.nn.l2_normalize(pdw, axis=1)\n\n # Uncomment the following for REGULARIZATION\n # l1_l2_reg = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0001, scale_l2=0.0001)\n # reg_penalty = tf.contrib.layers.apply_regularization(l1_l2_reg, [params['L1'], params['L2']])\n\n # Choose loss function\n # loss = tf.reduce_mean(tf.abs(pdw_norm - new_emb_norm))\n\n loss = tf.losses.mean_squared_error(pdw, new_emb)\n\n\n\"\"\"\n Optimization\n\"\"\"\n\nwith tf.name_scope('optimization_via_grads'):\n optimizer = tf.train.GradientDescentOptimizer(lr)\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n\n\n # clip a small value to deal with vanishing and exploding gradients\n # if i don't add these lines, i get NaN values in gradients\n # gradients = [\n # None if gradient is None else tf.where(tf.logical_or(tf.is_nan(gradient), tf.is_inf(gradient)), tf.zeros_like(gradient), gradient)\n # for gradient in gradients]\n # gradients_, _ = tf.clip_by_global_norm(gradients, 1e-10)\n\n # Summarize all gradients and weights\n\n for grad, var in zip(gradients, variables):\n tf.summary.histogram(var.name + '/weights', var)\n tf.summary.histogram(var.name + '/gradient', grad)\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n\n\n\"\"\"\n Fixed-point update\n\"\"\"\nwith tf.name_scope(\"fixed_point_update\"):\n fp_emb = (1 - beta) * pdw + beta * e_i\n fp_update = tf.scatter_update(params['dwe'], wi, fp_emb) # we only update a portion of the embeddings\n dwe_diff = tf.reduce_max(tf.abs(fp_emb - pdw)) # maximum increment\n\n\n\"\"\"\n TRAINING PROCESS\n\n\"\"\"\n\nwith tf.Session() as sess:\n init_all_op = tf.global_variables_initializer()\n sess.run(init_all_op)\n\n # Merge all summaries into a single op\n merged_summary_op = tf.summary.merge_all()\n # tensorboard line for the Graph\n summary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n # python variable summary (Mean batch loss value)\n m_b_loss_summ = tf.Summary()\n\n print('Training on ' + str(true_nw) + \" data....\")\n\n # create the \"pool\" of indices for batch creation\n indices_pool = np.array([i for i in range(true_nw)])\n\n # basic training parameters\n num_epoch = 10 # total number of epochs to train\n cur_ep = 0\n cur_lr = 5e-2\n num_consec_train = 5 # number of consecutive epochs for SGD\n mode = ['sgd', 'fp']\n cur_mode = 0 # start with 'sgd'. Set this to 1 if you want to start with fp\n tol = 1e-3\n next_schedule = 4 # ( num_consec_train - 1)\n dwe_up_cnt = 0\n tic = 30\n beta_val = 0.7\n # entered_fp = 1\n\n for epoch in range(num_epoch):\n\n # Print epoch\n print(\"################################################################################\")\n print(\"Now processing data for epoch: \" + str(epoch))\n print(\"################################################################################\")\n\n # counter for processed data ( each epoch )\n cnt_processed = 0\n\n # total loss\n epoch_loss = 0\n\n # shuffle before slicing\n np.random.shuffle(indices_pool)\n cost = 0\n totTime = 0\n max_diff = -np.inf\n cur_beta = beta_val ** (dwe_up_cnt + 1)\n \n for i_s in range(0, true_nw, bs):\n\n # i_s and i_e are the Starting and Ending indices of the indices_pool, and their are used to sample our\n # shuffled dataset\n i_e = i_s + bs if i_s + bs < true_nw else true_nw - 1\n\n # word indices to be trained\n wis = indices_pool[i_s: i_e]\n # indices of the negative sample words\n # cnt = 0\n # nwis = []\n # while cnt < len(wis):\n # rand_num = np.random.randint(0, true_nw)\n # if rand_num not in wis:\n # nwis.append(rand_num)\n # cnt += 1\n # nwis = np.array(nwis)\n\n # provide the priors\n priors = np.ones(shape=(len(wis), mw, ms))\n\n # increase the count of processed data\n cnt_processed += len(wis)\n\n # initialize batch_loss\n batch_loss = 0\n\n if mode[cur_mode] == 'sgd':\n\n feed_d = {wi: wis, df: d['def'][wis], dm: d['dmask'][wis], wmask: d['wmask'][wis], h_d: d['idf'][wis],\n lr: cur_lr, pr: priors}\n _, batch_loss, merged_summary = sess.run([train_op, loss, merged_summary_op], feed_dict=feed_d)\n\n # record to Tensorboard\n summary_writer.add_summary(merged_summary, epoch)\n\n elif mode[cur_mode] == 'fp':\n\n feed_d = {wi: wis, df: d['def'][wis], dm: d['dmask'][wis], wmask: d['wmask'][wis], h_d: d['idf'][wis],\n beta: beta_val, pr: priors}\n _, batch_loss, diff, merged_summary = sess.run([fp_update, loss, dwe_diff, merged_summary_op], feed_dict=feed_d)\n\n # record to Tensorboard\n summary_writer.add_summary(merged_summary, epoch)\n\n # Difference on update / updates counted\n max_diff = max(max_diff, float(diff))\n dwe_up_cnt += 1\n\n epoch_loss += batch_loss\n # print mini-batch loss every \"tic\" time\n if (i_e // bs) % tic == 0:\n print(\"Current mode:\" + mode[cur_mode])\n print(\n \"Accumulated loss (\" + str(cnt_processed) + \" of \" + str(true_nw) + \" data): \" + str(epoch_loss))\n\n # End of epoch --> mean batch loss\n mean_batch_loss = epoch_loss / np.ceil((true_nw / bs))\n m_b_loss_summ.value.add(tag='Mean_batch_loss', simple_value= mean_batch_loss)\n summary_writer.add_summary(m_b_loss_summ, epoch)\n\n # At the end of each epoch determine the transition of the training process\n if cur_mode == 1 and max_diff < tol:\n cur_mode = 0 # switch to SGD\n max_diff = -np.inf\n next_schedule = epoch + num_consec_train\n elif cur_mode == 0 and next_schedule == epoch:\n cur_mode = 1 # switch to fixed-point iteration\n dwe_up_cnt = 0\n\n # Here you can add a function for saving the model for each epoch\n # TO-DO\n\n # configure the projector\n embeddings_writer = tf.summary.FileWriter(log_dir, sess.graph)\n config = projector.ProjectorConfig()\n config.model_checkpoint_path = os.path.join(models_dir, 'test_model.ckpt')\n embedding_conf = config.embeddings.add()\n embedding_conf.tensor_name = params['dwe'].name\n embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv')\n projector.visualize_embeddings(embeddings_writer, config)\n # Exit training process, save model\n save_model(sess, models_dir, 'test_model.ckpt')\n\n\n","repo_name":"alejandrokonto/nlp","sub_path":"src/dictionary_bootstrapping_Kang_n_Sohn.py","file_name":"dictionary_bootstrapping_Kang_n_Sohn.py","file_ext":"py","file_size_in_byte":13274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"6926220394","text":"import numpy as np\nfrom argparse import ArgumentParser\nfrom scipy.stats import hypergeom\nimport sys\n# from pathlib import Path\n\ndef hypergeom_projection(N, n):\n rN = np.arange(0, N+1)\n rn = np.arange(0, n+1)\n return np.array([hypergeom(N, i, n).pmf(rn) for i in rN])\n\ndef python2round(f):\n if round(f + 1) - round(f) != 1:\n return f + abs(f) / f * 0.5\n return round(f)\n\ndef index_bis(i, n):\n return int(min(max(python2round(i * n / float(n+1)), 2), n-2))\n\ndef calcJK13(n):\n J = np.zeros((n, n-1))\n for i in range(n):\n ibis = index_bis(i + 1, n) - 1\n J[i, ibis] = -(1.+n) * ((2.+i)*(2.+n)*(-6.-n+(i+1.)*(3.+n))-2.*(4.+n)*(-1.+(i+1.)*(2.+n))*(ibis+1.)\n +(12.+7.*n+n**2)*(ibis+1.)**2) / (2.+n) / (3.+n) / (4.+n)\n J[i, ibis - 1] = (1.+n) * (4.+(1.+i)**2*(6.+5.*n+n**2)-(i+1.)*(14.+9.*n+n**2)-(4.+n)*(-5.-n+2.*(i+1.)*(2.+n))*(ibis+1.)\n +(12.+7.*n+n**2)*(ibis+1.)**2) / (2.+n) / (3.+n) / (4.+n) / 2.\n J[i, ibis + 1] = (1.+n) * ((2.+i)*(2.+n)*(-2.+(i+1.)*(3.+n))-(4.+n)*(1.+n+2.*(i+1.)*(2.+n))*(ibis+1.)\n +(12.+7.*n+n**2)*(ibis+1.)**2) / (2.+n) / (3.+n) / (4.+n) / 2.\n return J\n\n\nif __name__ == '__main__':\n parser = ArgumentParser('convert matrices from transition_probability_explicit')\n parser.add_argument('--sample-size', '-n', type=int)\n parser.add_argument('--jackknife', '-j', type=int)\n parser.add_argument('output_file')\n\n args = parser.parse_args()\n n = args.sample_size\n k = args.jackknife\n\n # txt = Path(args.input_file).read_text()\n txt = sys.stdin.read()\n T = list()\n\n chunks = txt.split('---')\n for i, chunk in enumerate(chunks):\n if not chunk.isspace():\n t = np.fromstring(chunk, sep=' ')\n T.append(t.reshape((i+1, n+1)))\n\n M = np.zeros((n+1, n+1))\n assert np.all(T[0] == 0)\n\n # first prt of the sum - hypergeometric down\n for i in range(0, n+1):\n h = hypergeom_projection(n, i)\n M += h @ T[i]\n\n # second part - jackknife up\n J = np.eye(n+1)\n for i in range(n+1, n+k+1):\n J = calcJK13(i+1) @ J\n M += J.T @ T[i]\n\n np.savetxt(args.output_file, M)\n","repo_name":"ivan-krukov/taming-strong-selection","sub_path":"src/read_matrices.py","file_name":"read_matrices.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"2607944739","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport time\nimport json\nimport struct\nimport codecs\nfrom pymodbus.client.sync import ModbusTcpClient\nnamed_tuple = time.localtime() # getstruct_time\ntime_string = time.strftime(\"%m/%d/%Y, %H:%M:%S acthor watty.py\", named_tuple)\ndevicenumber = str(sys.argv[1])\nipadr = str(sys.argv[2])\nuberschuss = int(sys.argv[3])\natype = str(sys.argv[4])\ninstpower = int(sys.argv[5])\nforcesend = int(sys.argv[6])\n# forcesend = 0 default acthor time period applies\n# forcesend = 1 default overwritten send now\n# forcesend = 9 default overwritten no send\nfp = '/var/www/html/openWB/ramdisk/smarthome_device_'\nfile_string = fp + str(devicenumber) + '_acthor.log'\nfile_stringpv = fp + str(devicenumber) + '_pv'\nfile_stringcount = fp + str(devicenumber) + '_count'\nfile_stringcount5 = fp + str(devicenumber) + '_count5'\ncount5 = 999\nif os.path.isfile(file_stringcount5):\n f = open(file_stringcount5, 'r')\n count5 = int(f.read())\n f.close()\nif (forcesend == 0):\n count5 = count5 + 1\nelif (forcesend == 1):\n count5 = 999\nelse:\n count5 = 1\nif count5 > 3:\n count5 = 0\nf = open(file_stringcount5, 'w')\nf.write(str(count5))\nf.close()\nfaktor = 1.0\nmodbuswrite = 0\nneupower = 0\nif instpower == 0:\n instpower = 1000\ncap = 9000\nif atype == \"9s18\":\n faktor = 18000/instpower\n cap = 18000\nelif atype == \"9s\":\n faktor = 9000/instpower\nelif atype == \"M3\":\n faktor = 6000/instpower\nelse:\n faktor = 3000/instpower\npvmodus = 0\nif os.path.isfile(file_stringpv):\n f = open(file_stringpv, 'r')\n pvmodus = int(f.read())\n f.close()\npowerc = 0\n# aktuelle Leistung lesen\nclient = ModbusTcpClient(ipadr, port=502)\n#\n#\nstart = 1000\nresp=client.read_holding_registers(start,10,unit=1)\n# start = 3524\n# resp = client.read_input_registers(start, 10, unit=1)\nvalue1 = resp.registers[0]\nall = format(value1, '04x')\naktpower = int(struct.unpack('>h', codecs.decode(all, 'hex'))[0])\nif count5 == 0:\n count1 = 999\n if os.path.isfile(file_stringcount):\n f = open(file_stringcount, 'r')\n count1 = int(f.read())\n f.close()\n count1 = count1+1\n value1 = resp.registers[3]\n all = format(value1, '04x')\n status = int(struct.unpack('>h', codecs.decode(all, 'hex'))[0])\n # logik\n if uberschuss < 0:\n neupowertarget = int((uberschuss + aktpower) * faktor)\n else:\n neupowertarget = int((uberschuss + aktpower) * faktor)\n if neupowertarget < 0:\n neupowertarget = 0\n if neupowertarget > int(cap * faktor):\n neupowertarget = int(cap * faktor)\n # status nach handbuch Thor\n # 0.. Aus\n # 1-8 Geraetestart\n # 9 Betrieb\n # >=200 Fehlerzustand Leistungsteil\n neupower = neupowertarget\n # wurde Thor gerade ausgeschaltet ? (pvmodus == 99 ?)\n # dann 0 schicken wenn kein pvmodus mehr\n # und pv modus ausschalten\n if pvmodus == 99:\n modbuswrite = 1\n neupower = 0\n f = open(file_stringpv, 'w')\n pvmodus = 0\n f.write(str(pvmodus))\n f.close()\n # sonst wenn pv modus lauft , ueberschuss schicken\n else:\n if pvmodus == 1:\n modbuswrite = 1\n # logschreiben\n if count1 > 80:\n count1 = 0\n if count1 < 3:\n if os.path.isfile(file_string):\n f = open(file_string, 'a')\n else:\n f = open(file_string, 'w')\n helpstr = '%s devicenr %s ipadr %s ueberschuss %6d Akt Leistung'\n helpstr += ' %6d Status %2d type %s inst. Leistung %6d Skalierung %.2f'\n print(helpstr % (time_string, devicenumber, ipadr, uberschuss,\n aktpower, status, atype, instpower, faktor), file=f)\n helpstr = '%s devicenr %s ipadr %s Neu Leistung %6d '\n helpstr += 'pvmodus %1d modbuswrite %1d'\n print(helpstr % (time_string, devicenumber, ipadr, neupower,\n pvmodus, modbuswrite), file=f)\n f.close()\n # modbus write\n if modbuswrite == 1:\n rq = client.write_register(1000, neupower, unit=1)\n if count1 < 3:\n f = open(file_string, 'a')\n print('%s devicenr %s ipadr %s device written by modbus ' %\n (time_string, devicenumber, ipadr), file=f)\n f.close()\n f = open(file_stringcount, 'w')\n f.write(str(count1))\n f.close()\nelse:\n if pvmodus == 99:\n pvmodus = 0\nanswer = '{\"power\":' + str(aktpower) + ',\"powerc\":' + str(powerc)\nanswer += ',\"send\":' + str(modbuswrite) + ',\"sendpower\":' + str(neupower)\nanswer += ',\"on\":' + str(pvmodus) + '}'\nf1 = open('/var/www/html/openWB/ramdisk/smarthome_device_ret' +\n str(devicenumber), 'w')\njson.dump(answer, f1)\nf1.close()\n","repo_name":"skl77/openWB","sub_path":"modules/smarthome/acthor/watt.py","file_name":"watt.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"23"} +{"seq_id":"35528385152","text":"import os\nfrom time import perf_counter\nfrom tqdm import tqdm\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom transformers import PerceiverConfig, PerceiverModel\nfrom transformers.models.perceiver.modeling_perceiver import (\n PerceiverClassificationDecoder,\n)\nimport torch\nfrom torch import optim\nfrom scipy import stats\nfrom sklearn.metrics import confusion_matrix\n\nimport seaborn as sns\nfrom matplotlib import pyplot\n\n\n\n# Functions\ndef calc_pearsons(preds,labels):\n r = stats.pearsonr(preds, labels)\n return r[0]\n\ndef mean_pearsons(preds,labels):\n preds = np.row_stack([np.array(p) for p in preds])\n labels = np.row_stack([np.array(l) for l in labels])\n num_classes = preds.shape[1]\n class_wise_r = np.array([calc_pearsons(preds[:,i], labels[:,i]) for i in range(num_classes)])\n mean_r = np.mean(class_wise_r)\n return mean_r\n\n# Main\nparser = argparse.ArgumentParser(description=\"Train Perceiver\")\n\nvisual_parser = parser.add_mutually_exclusive_group(required=False)\nvisual_parser.add_argument('--visual', dest='visual', action='store_true')\nvisual_parser.add_argument('--no-visual', dest='visual', action='store_false')\nparser.set_defaults(visual=True)\n\naudio_parser = parser.add_mutually_exclusive_group(required=False)\naudio_parser.add_argument('--audio', dest='audio', action='store_true')\naudio_parser.add_argument('--no-audio', dest='audio', action='store_false')\nparser.set_defaults(audio=True)\n\ntextual_parser = parser.add_mutually_exclusive_group(required=False)\ntextual_parser.add_argument('--textual', dest='textual', action='store_true')\ntextual_parser.add_argument('--no-textual', dest='textual', action='store_false')\nparser.set_defaults(textual=True)\n\nFAU_parser = parser.add_mutually_exclusive_group(required=False)\nFAU_parser.add_argument('--FAU', dest='FAU', action='store_true')\nFAU_parser.add_argument('--no-FAU', dest='FAU', action='store_false')\nparser.set_defaults(FAU=True)\n\nparser.add_argument(\n \"--visual_features_input_folder\",\n help=\"Input folder containing the extracted visual features\",\n default=None,\n required=False)\nparser.add_argument(\n \"--audio_features_input_folder\",\n help=\"Input folder containing the extracted audio features\",\n default=None,\n required=False)\nparser.add_argument(\n \"--textual_features_input_folder\",\n help=\"Input folder containing the extracted textual features\",\n default=None,\n required=False)\nparser.add_argument(\n \"--FAU_features_input_folder\",\n help=\"Input folder containing the extracted FAU features\",\n default=None,\n required=False)\nparser.add_argument(\n \"--csv_path\",\n help=\"Input CSV containing labels and splits\",\n default=None,\n required=True)\nparser.add_argument(\n \"--output_path\",\n help=\"Output folder containing the generated confusion matrices\",\n default=None,\n required=False)\nparser.add_argument(\n \"--model_path\",\n help=\"Output folder to store model checkpoints\",\n default=None,\n required=True)\nparser.add_argument(\n \"--batch_size\",\n help=\"Training batch size\",\n type=int,\n default=16,\n required=False)\n\nargs = parser.parse_args()\n\n#if not os.path.exists(args.output_path):\n# os.mkdir(args.output_path)\n\nemotions = ['Adoration', 'Amusement', 'Anxiety', 'Disgust', 'Empathic-Pain', 'Fear', 'Surprise']\nx_axis_labels = [\"[0.0, 0.1)\", \"[0.1, 0.2)\", \"[0.2, 0.3)\", \"[0.3, 0.4)\", \"[0.4, 0.5)\", \"[0.5, 0.6)\", \"[0.6, 0.7)\", \"[0.7, 0.8)\", \"[0.8, 0.9)\", \"[0.9, 1.0]\"] # labels for x-axis\ny_axis_labels = [\"[0.0, 0.1)\", \"[0.1, 0.2)\", \"[0.2, 0.3)\", \"[0.3, 0.4)\", \"[0.4, 0.5)\", \"[0.5, 0.6)\", \"[0.6, 0.7)\", \"[0.7, 0.8)\", \"[0.8, 0.9)\", \"[0.9, 1.0]\"] # labels for y-axis\n\n# set device\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n#device = torch.device('cpu')\n\n# get the file list\ndf = pd.read_csv(args.csv_path, sep=\",\")\ngroups = df.groupby(\"Split\")\nsplits = {}\nfor name, group in groups:\n splits[name] = group\nval_file_list = [file[1:-1]+\".pt\" for file in list(splits[\"Val\"].File_ID)]\nval_labels = torch.tensor(splits[\"Val\"][['Adoration', 'Amusement', 'Anxiety', 'Disgust', 'Empathic-Pain', 'Fear', 'Surprise']].values)\n\n#loading data\nval_data = None\nif args.visual:\n val_data = torch.stack([torch.load(args.visual_features_input_folder + \"/\" + file) for file in val_file_list])\nif args.audio:\n if val_data == None:\n val_data = torch.stack([torch.load(args.audio_features_input_folder + \"/\" + file) for file in val_file_list])\n else:\n val_data = torch.cat((val_data, torch.stack([torch.load(args.audio_features_input_folder + \"/\" + file) for file in val_file_list])), 2)\n\nif args.textual:\n if val_data == None:\n val_data = torch.stack([torch.load(args.textual_features_input_folder + \"/\" + file) for file in val_file_list])\n else:\n val_data = torch.cat((val_data, torch.stack([torch.load(args.textual_features_input_folder + \"/\" + file) for file in val_file_list])), 2)\n\nif args.FAU:\n if val_data == None:\n val_data = torch.stack([torch.cat((torch.load(args.FAU_features_input_folder + \"/\" + file).to(dtype=torch.float32), torch.zeros(32,5).to(dtype=torch.float32)), 1) for file in val_file_list])\n else:\n val_data = torch.cat((val_data, torch.stack([torch.cat((torch.load(args.FAU_features_input_folder + \"/\" + file).to(dtype=torch.float32), torch.zeros(32,5).to(dtype=torch.float32)), 1) for file in val_file_list])), 2)\n\n\n# loading model\nmodel = torch.load(args.model_path)\nmodel.to(device)\nmodel.eval()\n\n# compute the number of batches\nif len(val_file_list) % args.batch_size == 0:\n n_val_batches = int(len(val_file_list) / args.batch_size)\nelse:\n n_val_batches = int(len(val_file_list) / args.batch_size) + 1\n\npreds_discretized = []\nlabels_discretized = []\n\n\n# make predictions\nwith torch.no_grad():\n current_loss = 0.0\n preds = None\n for j in tqdm(range(n_val_batches)):\n batch = val_data[j*args.batch_size:min(len(val_file_list), (j+1)*args.batch_size), :, :].to(device)\n #labels = val_labels[j*args.batch_size:min(len(val_file_list), (j+1)*args.batch_size), :].to(device).float()\n outputs = model(inputs=batch)\n logits = outputs.logits\n\n if preds == None:\n preds = logits.detach().cpu()\n else:\n preds = torch.cat((preds, logits.detach().cpu()), 0)\n \n for i in range(len(emotions)):\n preds_discretized.append([int(e[i]/0.1) if e[i] < 1 else 9 for e in preds])\n labels_discretized.append([int(e[i]/0.1) if e[i] < 1 else 9 for e in val_labels])\n cm = confusion_matrix(labels_discretized[i], preds_discretized[i]).astype(np.float32)\n\n for j in range(len(cm)):\n cm[j] = cm[j] / sum(cm[j])\n\n print(\"Pearson Correlation of\", emotions[i], \"emotion:\", calc_pearsons([e[i] for e in preds], [e[i] for e in val_labels]))\n print(\"Confusion Matrix of\", emotions[i], \"emotion:\\n\", cm, \"\\n\\n\")\n\n pyplot.figure(figsize=(15, 10))\n sns.set(font_scale=1.8)\n ax = sns.heatmap(cm, cmap=\"Blues\", annot=True, fmt=\".2f\")\n ax.set_xlabel(\"Predicted\", fontsize = 22)\n ax.set_ylabel(\"Gold Standard\", fontsize = 22)\n ax.set_xticklabels(x_axis_labels, rotation=45)\n ax.set_yticklabels(y_axis_labels, rotation=0)\n fig = ax.get_figure()\n fig.savefig(\"data/heatmaps/heatmap_\" + emotions[i] + \".png\", bbox_inches='tight')\n fig.clf()\n\n preds_classification = [np.argmax(e) for e in preds]\n labels_classification = [np.argmax(e) for e in val_labels]\n cm = confusion_matrix(labels_classification, preds_classification)\n print(\"Confusion Matrix of emotion classification:\\n\", cm, \"\\n\\n\")\n\n '''\n ax = sns.heatmap(cm, cmap=\"Blues\", annot=True, yticklabels=emotions)\n ax.set_xticklabels(emotions, rotation=45)\n fig = ax.get_figure()\n fig.savefig(\"data/heatmaps/heatmap_emotion_classification.png\", bbox_inches='tight')\n fig.clf()\n \n pearson = mean_pearsons(preds, val_labels.detach().cpu())\n print(\"PEARSON Correlation:\", pearson)\n '''\n\n\n\n","repo_name":"VaianiLorenzo/ViPER","sub_path":"confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":8081,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"23"} +{"seq_id":"5358085073","text":"from pytest import raises\n\nfrom peakina.helpers import (\n UnknownType,\n bytes_head,\n detect_encoding,\n detect_sep,\n detect_type,\n guess_type,\n str_head,\n validate_encoding,\n validate_kwargs,\n)\n\n\ndef test_bytes_head(path):\n assert bytes_head(path('0_0.csv'), 1) == b'a,b\\n'\n assert bytes_head(path('0_0.csv'), 100) == b'a,b\\n0,0\\n0,1'\n\n\ndef test_str_head(path):\n assert str_head(path('0_0.csv'), 1) == 'a,b\\n'\n assert str_head(path('0_0.csv'), 100) == 'a,b\\n0,0\\n0,1'\n with raises(UnicodeDecodeError):\n str_head(path('latin_1.csv'), 1)\n assert str_head(path('latin_1.csv'), 1, encoding='latin1')[:4] == 'Date'\n\n\ndef test_validate_encoding(path):\n assert validate_encoding(path('0_0.csv'), None)\n assert validate_encoding(path('0_0.csv'), 'utf8')\n assert not validate_encoding(path('latin_1.csv'), 'utf8')\n assert validate_encoding(path('latin_1.csv'), 'latin1')\n\n\ndef test_detect_encoding(path):\n assert detect_encoding(path('latin_1.csv')) == 'ISO-8859-1'\n\n\ndef test_detect_sep(path):\n assert detect_sep(path('0_0.csv')) == ','\n assert detect_sep(path('0_0_sep.csv')) == ';'\n\n\ndef test_detect_type(path):\n assert detect_type(path('0_0.csv')) == 'csv'\n with raises(UnknownType) as e:\n detect_type(path('fixture.xml'))\n assert str(e.value) == \"Unknown detected type. Supported types are: 'csv', 'excel', 'json'.\"\n\n\ndef test_guess_type():\n assert guess_type('a.tsv', is_regex=False) == 'csv'\n with raises(UnknownType) as e:\n guess_type('a.tsv$', is_regex=False)\n assert str(e.value) == \"Unknown guessed type None. Supported types are: 'csv', 'excel', 'json'.\"\n with raises(UnknownType):\n guess_type('a.jpg', is_regex=False)\n with raises(UnknownType):\n guess_type('a.jpg$', is_regex=True)\n assert guess_type('a.tsv', is_regex=True) == 'csv'\n assert guess_type('a.tsv$', is_regex=True) == 'csv'\n assert guess_type('a.*', is_regex=True) is None\n\n\ndef test_validate_kwargs():\n import pandas as pd\n\n assert validate_kwargs({'encoding': 'utf8'}, [pd.read_csv])\n with raises(ValueError) as e:\n validate_kwargs({'sheet_name': 0}, [pd.read_csv])\n assert str(e.value) == \"Unsupported kwargs: 'sheet_name'\"\n assert validate_kwargs({'sheet_name': 0}, [pd.read_csv, pd.read_excel])\n","repo_name":"adimascio/peakina","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"38351672048","text":"import math\nimport os\nimport random\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.keras import backend, callbacks\nfrom tensorflow.python.keras.initializers import Constant\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import layers\nfrom .weighted_loss import WeightedLoss\nfrom ..base_model import BaseModel\nfrom utils import tool\nfrom tensorflow.python import train\nfrom utils.auc import AUC # This can be replaced by tf.keras.AUC when tf version >=1.12\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.ops import nn\n\n\nclass UncertaintyWeight(object):\n '''\n Train any model in uncertainty_weight manner\n '''\n\n def __init__(self, base_model: BaseModel):\n '''\n :param base_model: any model inherited from the base_model\n '''\n self.base_model = base_model\n self.add_weighted_loss()\n\n def __getattr__(self, item):\n '''\n Delegate the base model\n :param item:\n :return:\n '''\n return getattr(self.base_model, item)\n\n def add_weighted_loss(self):\n y_true = layers.Input(shape=(1,), dtype=tf.float32, name='label')\n user_id, item_id, domain_idx = self.model.inputs\n y_pred = self.model.outputs[0]\n y_pred = WeightedLoss(n_domains=self.n_domain)([y_true, y_pred, domain_idx])\n model = Model(inputs=[user_id, item_id, domain_idx, y_true], outputs=y_pred)\n model.summary()\n # Optimization\n if self.train_config['optimizer'] == 'adam':\n opt = train.AdamOptimizer(learning_rate=self.train_config['learning_rate'])\n else:\n opt = self.train_config['optimizer']\n\n model.compile(loss=None,\n optimizer=opt,\n metrics=[AUC(num_thresholds=500, name=\"AUC\")])\n model.metrics_names = [] # Must set to empty to remove bug\n self.model = model\n\n def train(self):\n backend.get_session().run(tf.global_variables_initializer())\n\n tensorboard_callback = callbacks.TensorBoard(log_dir=os.path.dirname(self.checkpoint_path),\n histogram_freq=self.train_config['histogram_freq'],\n write_grads=True)\n data_iter = self.build_data_iter()\n train_sequence = list(range(self.n_domain))\n lock = False\n for epoch in range(self.train_config['epoch']):\n print(\"Epoch: {}\".format(epoch), \"-\" * 30)\n # Train\n random.shuffle(train_sequence)\n for idx in train_sequence:\n d = self.dataset.train_dataset[idx]\n print(\"Train on: Domain {}\".format(idx))\n old_time = time.time()\n self.model.fit(d['data'], steps_per_epoch=d['n_step'], verbose=0, callbacks=[],\n epochs=epoch + 1, initial_epoch=epoch)\n print(\"Training time: \", time.time() - old_time)\n # Val\n print(\"Val Result: \")\n avg_loss, avg_auc, domain_loss, domain_auc = self.val_and_test(\"val\")\n # Early Stopping\n if self.early_stop_step(avg_auc):\n break\n # Test\n print(\"Test Result: \")\n test_avg_loss, test_avg_auc, test_domain_loss, test_domain_auc = self.val_and_test(\"test\")\n\n # Lock the graph for better performance\n if not lock:\n graph = tf.get_default_graph()\n graph.finalize()\n lock = True\n\n def build_data_iter(self):\n data_iter = {}\n for idx, d in self.dataset.train_dataset.items():\n train_iter = d['data'].make_initializable_iterator()\n data_iter[idx] = {\"train_iter\": train_iter, \"train_step\": d['n_step']}\n return data_iter\n","repo_name":"RManLuo/MAMDR","sub_path":"model_zoo/uncertainty_weight/uncertainty_weight.py","file_name":"uncertainty_weight.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"23"} +{"seq_id":"5536857002","text":"from django.urls import path\nfrom .views import (\n KoncertListView, \n KoncertDetailView, \n KoncertCreateView,\n KoncertUpdateView,\n KoncertDeleteView,\n CzwartekListView,\n PiatekListView,\n SobotaListView,\n TsportuListView,\n TsportuDetailView,\n TkulturyListView,\n TkulturyDetailView,\n PlanListView,\n )\nfrom . import views\n\nurlpatterns = [\n path('', PlanListView.as_view(), name='harmonogram-home'),\n path('tydzien-sportu/', TsportuListView.as_view(), name='harmonogram-tydzien-sportu'),\n path('tydzien-sportu/szczegoly//', TsportuDetailView.as_view(), name='tydzien-sportu-detail'),\n path('tydzien-sportu/szczegoly///', views.tsportu_uczestnictwo_me, name='tsportu_uczestnictwo_me'),\n path('tydzien-sportu/szczegoly///', views.tsportu_uczestnictwo_other, name='tsportu_uczestnictwo_other'),\n path('tydzien-sportu//update/', KoncertUpdateView.as_view(), name='tydzien-sportu-update'),\n path('tydzien-sportu//delete/', KoncertDeleteView.as_view(), name='tydzien-sportu-delete'),\n path('tydzien-kultury/', TkulturyListView.as_view(), name='harmonogram-tydzien-kultury'),\n path('tydzien-kultury/szczegoly//', TkulturyDetailView.as_view(), name='tydzien-kultury-detail'),\n path('tydzien-kultury/szczegoly///', views.tkultury_uczestnictwo_me, name='tkultury_uczestnictwo_me'),\n path('tydzien-kultury/szczegoly///', views.tkultury_uczestnictwo_other, name='tkultury_uczestnictwo_other'),\n path('tydzien-kultury//update/', KoncertUpdateView.as_view(), name='tydzien-kultury-update'),\n path('tydzien-kultury//delete/', KoncertDeleteView.as_view(), name='tydzien-kultury-delete'),\n path('koncerty/', KoncertListView.as_view(), name='harmonogram-koncerty'),\n path('koncerty/szczegoly//', KoncertDetailView.as_view(), name='koncerty-detail'),\n path('wydarzenie/nowe/', KoncertCreateView.as_view(), name='koncerty-create'),\n path('koncerty//update/', KoncertUpdateView.as_view(), name='koncerty-update'),\n path('koncerty//delete/', KoncertDeleteView.as_view(), name='koncerty-delete'),\n path('koncerty/czwartek/', CzwartekListView.as_view(), name='koncerty-czwartek'),\n path('koncerty/piatek/', PiatekListView.as_view(), name='koncerty-piatek'),\n path('koncerty/sobota/', SobotaListView.as_view(), name='koncerty-sobota'),\n path('koncerty/szczegoly///', views.koncerty_uczestnictwo_me, name='koncerty_uczestnictwo_me'),\n path('koncerty/szczegoly///', views.koncerty_uczestnictwo_other, name='koncerty_uczestnictwo_other'),\n]","repo_name":"ArkadiuszJablonski/Aplikacja-juwenalia","sub_path":"harmonogram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"18722387051","text":"\"\"\"Tests for the experiment stack module.\"\"\"\n\nimport unittest\nimport yaml\n\nfrom vot.workspace import NullStorage\nfrom vot.stack import Stack, list_integrated_stacks, resolve_stack\n\nclass NoWorkspace:\n \"\"\"Empty workspace, does not save anything\n \"\"\"\n\n @property\n def storage(self):\n \"\"\"Returns the storage object for the workspace. \"\"\"\n return NullStorage()\n\nclass TestStacks(unittest.TestCase):\n \"\"\"Tests for the experiment stack utilities\n \"\"\"\n\n def test_stacks(self):\n \"\"\"Test loading integrated stacks\n \"\"\"\n \n stacks = list_integrated_stacks()\n for stack_name in stacks:\n try:\n with open(resolve_stack(stack_name), 'r') as fp:\n stack_metadata = yaml.load(fp, Loader=yaml.BaseLoader)\n Stack(stack_name, NoWorkspace(), **stack_metadata)\n except Exception as e:\n self.fail(\"Stack {}: {}\".format(stack_name, e))","repo_name":"votchallenge/toolkit","sub_path":"vot/stack/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"23"} +{"seq_id":"34698539066","text":"from collections import deque\n\nleft = set(('{', '[', '('))\nright = set(('}', ']', ')'))\n\n\ndef check(queue):\n stack = []\n for i in range(len(queue)):\n if len(stack) == 0:\n if queue[i] in right:\n return False\n else:\n stack.append(queue[i])\n else:\n if queue[i] in left:\n stack.append(queue[i])\n elif queue[i] in right:\n if queue[i] == '}' and stack[-1] == '{':\n stack.pop()\n elif queue[i] == ')' and stack[-1] == '(':\n stack.pop()\n elif queue[i] == ']' and stack[-1] == '[':\n stack.pop()\n else:\n return False\n if len(stack) == 0:\n return True\n else:\n return False\n\n\ndef solution(s):\n answer = 0\n queue = deque(list(s))\n for i in range(len(queue)):\n if check(queue):\n answer += 1\n queue.append(queue.popleft())\n\n return answer","repo_name":"inkyu-yoon/Study_Algorithm","sub_path":"파이썬/프로그래머스/Lv2/괄호 회전하기/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"743317269","text":"import numpy as np\nimport datetime\nimport json\nfrom datetime import datetime\nfrom .. import S2SDownloader\nfrom .model_setup import *\n\n\nclass S2SDownloaderECMWF(S2SDownloader):\n DEFAULT_REQUEST_ECMWF = {\"origin\": \"ecmf\"}\n STEP_ALL = utils.list_to_string(np.arange(0, 1104 + 1, 24))\n MODEL_NAME = \"ecmwf\"\n\n request = {}\n\n def __init__(self):\n S2SDownloader.__init__(self)\n self.request = super().DEFAULT_REQUEST\n self.request.update(self.DEFAULT_REQUEST_ECMWF)\n\n def retreive(\n self,\n param,\n reftime,\n plevs,\n file_descr,\n path=\"./\",\n area=None,\n step=\"all\",\n exact_reftime=False,\n write_info_file=True,\n rt_cf_kwargs={},\n rt_pf_kwargs={},\n hc_cf_kwargs={},\n hc_pf_kwargs={},\n **kwargs\n ):\n \"\"\"\n Download S2S ECMWF data.\n Args:\n param (): Parameters to download.\n reftime (): Realtime Dates.\n plevs (): Pressure Levels.\n file_descr (): Description of fields that will show up in the file name.\n path (): Directory to store the output.\n area (str): Area in the format \"N/W/S/E\"\n step (): Forecast lead times; if \"all\" then download all available in daily resolution.\n exact_reftime (bool): If true, assert that the given reftime dates are actual model initialization dates, if False, then automatically drop the invalid dates.\n write_info_file (bool): If true, write a .json file into the target directory with info about request.\n rt_cf_kwargs (dict): Additional request keywords passed to realtime control forecasts.\n rt_pf_kwargs (dict): Additional request keywords passed to realtime perturbed forecasts.\n hc_cf_kwargs (dict): Additional request keywords passed to hindcast control forecasts.\n hc_pf_kwargs (dict): Additional request keywords passed to hindcast perturbed forecasts.\n **kwargs (): Additional request keywords.\n \"\"\"\n self.request[\"param\"] = utils.list_to_string(list(param))\n if plevs is not None:\n self.request[\"levelist\"] = utils.list_to_string(list(plevs))\n if area is not None:\n self.request[\"area\"] = area\n\n # all forecast steps\n if step == \"all\":\n step = self.STEP_ALL\n else:\n step = utils.list_to_string(step)\n self.request[\"step\"] = step\n\n # drop dates without model initialization\n filtered_reftime = self.filter_reftimes(reftime)\n if exact_reftime:\n assert filtered_reftime == reftime, (\n \"reftime contains dates that are no reftimes. only allowed if \"\n \"assert_reftime=False. \"\n )\n\n self.request.update(dict(**kwargs))\n\n for d in filtered_reftime:\n\n self.request[\"date\"] = str(d)\n\n for fc_type, fc_type_kwargs, fc_type_class in [\n (\"rt_cf\", rt_cf_kwargs, RtCf),\n (\"rt_pf\", rt_pf_kwargs, RtPf),\n (\"hc_cf\", hc_cf_kwargs, HcCf),\n (\"hc_pf\", hc_pf_kwargs, HcPf),\n ]:\n if \"skip\" in fc_type_kwargs:\n if fc_type_kwargs.get(\"skip\", False):\n continue\n else:\n fc_type_kwargs.pop(\"skip\")\n fc_type_kwargs = dict(fc_type_class(d).request, **fc_type_kwargs)\n self.request[\"target\"] = (\n path\n + \"/\"\n + self.file_name(file_descr=file_descr, fc_type=fc_type, reftime=d)\n )\n full_request = dict(self.request, **fc_type_kwargs)\n if \"skip\" in full_request:\n full_request.pop(\"skip\")\n print(full_request)\n print(full_request)\n super().retrieve(full_request)\n\n if write_info_file:\n self.make_request_info_file(\n path, file_descr, fc_type, full_request, filtered_reftime\n )\n write_info_file = False\n\n def file_name(self, file_descr, fc_type, reftime):\n \"\"\"\n File name convention.\n Args:\n file_descr (str): Description of fields that is included in the file name.\n fc_type (str): Usually one of \"rt_cf\", \"rt_pf\", \"hc_cf\", \"hc_pf\".\n reftime (np.datetime64): Model realtime initilization date (reference time).\n\n Returns : File Name (str)\n\n \"\"\"\n target = \"s2s_{model}_{file_descr}_{reftime}_{fc_type}.nc\".format(\n model=self.MODEL_NAME,\n file_descr=file_descr,\n fc_type=fc_type,\n reftime=reftime,\n )\n return target\n\n def make_request_info_file(\n self, path, file_descr, fc_type, full_request, all_dates\n ):\n \"\"\"\n Create a .json file including information about s2stools request.\n Args:\n path (str): Directory to place file.\n file_descr (str): Description of fields that is included in file name.\n fc_type (str): Usually one of \"rt_cf\", \"rt_pf\", \"hc_cf\", \"hc_pf\".\n full_request (dict): Full API request.\n all_dates (list of np.datetime64[D]): List of all requested reftime dates.\n \"\"\"\n now = datetime.now().isoformat(timespec=\"minutes\")\n filename = (\n \"request_s2s_{model_name}_{file_descr}_{datetime}_{fc_type}.json\".format(\n model_name=self.MODEL_NAME,\n file_descr=file_descr,\n datetime=now.replace(\":\", \"\"),\n fc_type=fc_type,\n )\n )\n content = {\n \"info\": {\n \"time\": now,\n \"fc_type\": fc_type,\n \"reftime_dates\": list(all_dates.astype(\"str\")),\n },\n \"request\": full_request,\n }\n\n if True: # for development purpose\n with open(path + \"/\" + filename, \"w\") as outfile:\n json.dump(content, outfile)\n\n @staticmethod\n def filter_reftimes(dates):\n \"\"\"\n Drop dates that are no valid reftimes (realtime model initialization dates) for S2S ECMWF.\n Args:\n dates (list of np.datetime64[D]): Dates to analyze.\n Returns:\n list: List of valid dates.\n \"\"\"\n dates = np.atleast_1d(dates)\n dates_pd = pd.Series(dates)\n filtered_dates = dates[(dates_pd.dt.weekday == 0) | (dates_pd.dt.weekday == 3)]\n assert len(filtered_dates) > 0, \"no dates left after filtering reftimes\"\n return filtered_dates\n","repo_name":"jonas-spaeth/s2stools","sub_path":"s2stools/download/ecmwf/S2SDownloaderECMWF.py","file_name":"S2SDownloaderECMWF.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"23"} +{"seq_id":"46594773456","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 13 10:39:14 2020\n\n@author: aidam\n\"\"\"\nimport numpy as np\nimport cv2\n\n\ncap = cv2.VideoCapture(1)\n# Check if the webcam is opened correctly\nif not cap.isOpened():\n cap = cv2.VideoCapture(0)\nif not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\n\nwhile True:\n ret,frame = cap.read()\n\n cv2.imshow('Original video',frame)\n\n if cv2.waitKey(2) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"BartyPitt/RoboticsProject","sub_path":"OpenCvCode/Test_if_webcam_works.py","file_name":"Test_if_webcam_works.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"23"} +{"seq_id":"20490903256","text":"def mergeSort(num):\n\tres = []\n\tn = len(num)\n\tif n <= 1:\n\t\treturn num\n\tpre = mergeSort(num[ : n//2])\n\tpost = mergeSort(num[n//2 : ])\n\tl, k, m,n = 0, 0, len(pre), len(post)\n\twhile l < m and k < n:\n\t\tif pre[l] <= post[k]:\n\t\t\tres.append(pre[l])\n\t\t\tl += 1\n\t\telse:\n\t\t\tres.append(post[k])\n\t\t\tk += 1\n\tif l < m:\n\t\tres += pre[l:]\n\tif k < n:\n\t\tres += post[k:]\n\treturn res\n\na = mergeSort([10,3,5])\n\n\n\n","repo_name":"effyhuihui/leetcode","sub_path":"basic/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"27704385735","text":"from __future__ import absolute_import, print_function, unicode_literals\nfrom collections import OrderedDict\nfrom json import dumps as to_json\nfrom logging import getLogger\nlog = getLogger('gs.group.groups.json.hook')\nfrom zope.cachedescriptors.property import Lazy\n#from zope.component import createObject, queryMultiAdapter\nfrom zope.formlib import form\nfrom gs.content.form.api.json import SiteEndpoint\nfrom gs.auth.token import log_auth_error\nfrom .interfaces import IGetGroups\nfrom gs.groups.allgroups import AllGroupsOnSite\nfrom gs.group.privacy.interfaces import IGSGroupVisibility\nfrom gs.group.type.set.interfaces import IUnsetType\nfrom Products.GSGroup.interfaces import IGSMailingListInfo\n\n\nclass NoList(AttributeError):\n 'There was no such list'\n\n\nclass GroupsHook(SiteEndpoint):\n '''The page that gets a list of the groups on the site'''\n label = 'Get the groups on the site'\n form_fields = form.Fields(IGetGroups, render_context=False)\n\n @form.action(label='Get', name='get', prefix='',\n failure='handle_get_failure')\n def handle_get_groups(self, action, data):\n '''The form action for the *Get groups* page.\n\n:param action: The button that was clicked.\n:param dict data: The form data.'''\n retval = to_json(self.groups)\n return retval\n\n def handle_get_failure(self, action, data, errors):\n log_auth_error(self.context, self.request, errors)\n retval = self.build_error_response(action, data, errors)\n return retval\n\n @staticmethod\n def group_to_dict(groupInfo):\n groupUnset = IUnsetType(groupInfo.groupObj)\n groupVisiblity = IGSGroupVisibility(groupInfo)\n try:\n l = IGSMailingListInfo(groupInfo.groupObj)\n except AttributeError as ae:\n raise NoList(ae)\n\n retval = OrderedDict((\n ('id', groupInfo.id),\n ('name', groupInfo.name),\n ('url', groupInfo.url),\n ('email', l.get_property('mailto')),\n ('type', groupUnset.name),\n ('privacy', groupVisiblity.visibility), ))\n return retval\n\n @Lazy\n def groups(self):\n '''All the members of the GroupServer instance.'''\n retval = []\n for groupInfo in AllGroupsOnSite(self.context):\n try:\n r = self.group_to_dict(groupInfo)\n except NoList:\n continue\n retval.append(r)\n return retval\n","repo_name":"groupserver/gs.group.groups.json","sub_path":"gs/group/groups/json/hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"36820537579","text":"def iscons(a):\n vowels = ('a','e','i','o','u')\n a = a.lower()\n if a >= 'a' and a <= 'z':\n if a in vowels:\n return False\n else:\n return True\n\n#pr=input()\n\ndef swap(h):\n l=h.split(\" \")\n l.reverse()\n final=\"\"+l[0]+\" \"+l[-1]\n return final\n\nprint(swap(\"Cristiano Ronaldo\"))\n\n\ndef grades_frequency(grades):\n ngrades = len(grades)\n freq = dict()\n for grade in range(21):\n freqgrade = grades.count(grade)\n if freqgrade > 0:\n freq[grade] = round(100 * freqgrade / ngrades , 1)\n return freq\n\ngrades = [10, 9, 10, 12, 10, 9, 11, 10, 12, 13]\nprint(grades_frequency(grades))","repo_name":"PedroCorreia56/IADP","sub_path":"Lesson13/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"3992024972","text":"\"\"\"Application of the MLMC to a unidimensional Lévy process with the CTMC scheme.\n\"\"\"\n\nfrom itertools import product\nimport math\n\nfrom scripts.mlmc.tools.utils import *\n\nfrom rpylib.distribution.sampling import SamplingMethod\nfrom rpylib.model.levycopulamodel import LevyCopulaModel\nfrom rpylib.montecarlo.configuration import ConfigurationMultiLevel\nfrom rpylib.montecarlo.configuration import compute_convergence_rates\nfrom rpylib.montecarlo.multilevel.criteria import GilesConvergenceCriteria\nfrom rpylib.montecarlo.multilevel.engine import Engine\nfrom rpylib.montecarlo.statistic.statistic import MLMCStatistics\nfrom rpylib.process.markovchain.markovchainlevycopula import vol_adjustment_ij\nfrom rpylib.process.coupling.couplinglevycopula import CouplingProcessLevyCopula\nfrom rpylib.tools.timer import timer\n\n\ndef compute_max_level_copula(\n levy_copula_model: LevyCopulaModel, h0: float, maturity: float, rmse: float\n) -> int:\n dimension = levy_copula_model.dimension()\n bg = levy_copula_model.blumenthal_getoor_index()\n mass = levy_copula_model.mass\n\n def int_xx(h):\n res = sum(\n vol_adjustment_ij(i, i, h=2 * h, levy_model=levy_copula_model)\n for i in range(dimension)\n )\n return res\n\n def fun1(h):\n a = [-h / 2] * dimension\n b = [+h / 2] * dimension\n intervals = [\n [[h_l, h_r], [-np.inf, h_l], [h_r, np.inf]] for h_l, h_r in zip(a, b)\n ]\n cartesian_product = product(*intervals)\n # discard first set which is [h_l1, h_r1]x[h_l2, h_r2]x...x[h_ln, h_rn]\n # we are calculating the measure on the complement of this very set\n next(cartesian_product)\n\n res = 0\n for c_set in cartesian_product:\n a, b = zip(*c_set)\n res += mass(a=a, b=b)\n return res\n\n def bound_fun(h):\n x1 = (dimension * h**2) * fun1(h)\n x2 = int_xx(h)\n return max(x1, x2)\n\n hl = [h0 / 2**level for level in range(5, 15)]\n log_h = np.log(np.array(hl))\n log_bounds = np.log(np.array([bound_fun(h) for h in hl]))\n logNcts = max(log_bounds - (2 - bg) * log_h)\n logDZero = np.log(8) + logNcts\n logDB = np.log(max(maturity, 1) * dimension) + logDZero\n two_minus_bg = 2 - bg\n\n val = math.floor(\n (np.log(3) + two_minus_bg * np.log(h0) + logDB - 2 * np.log(rmse))\n / (two_minus_bg * np.log(2))\n )\n return val\n\n\ndef helper_coupling_copula(\n rmse: float, grid: CTMCGrid, levy_copula_model: LevyCopulaModel, product: Product\n) -> MLMCStatistics:\n method = SamplingMethod.BINARYSEARCHTREEADAPTED\n maximum_level = compute_max_level_copula(\n levy_copula_model=levy_copula_model,\n h0=grid.h,\n maturity=product.maturity,\n rmse=rmse,\n )\n maximum_level = min(20, maximum_level)\n\n cr = compute_convergence_rates(levy_copula_model.blumenthal_getoor_index())\n coupling_process = CouplingProcessLevyCopula(\n levy_copula_model=levy_copula_model, grid=grid, method=method\n )\n criteria = GilesConvergenceCriteria()\n configuration = ConfigurationMultiLevel(\n convergence_rates=cr, convergence_criteria=criteria, maximum_level=maximum_level\n )\n mc_engine = Engine(configuration=configuration, coupling_process=coupling_process)\n result = mc_engine.price(product, rmse)\n\n return result\n\n\n@timer\ndef coupling_copula_cost_and_levels(name: str, rmses: list[float]) -> None:\n root_path = Path(__file__).cwd().parent\n grid, model, product = helper_data(name, \"copulas\", root_path)\n beta = model.blumenthal_getoor_index()\n rmses = np.array(rmses)\n outputs = [helper_coupling_copula(rmse, grid, model, product) for rmse in rmses]\n root_path_results = Path(Path().cwd().parent, \"results/giles_applied/copulas\")\n save_mlmc_coupling_applied_results(rmses, outputs, root_path_results, name, beta)\n\n\nif __name__ == \"__main__\":\n my_name = \"hem_cgmy02\"\n my_rmses = [0.2, 0.1]\n\n # note that you must run the `mlmc_convergence_copula` script first with the same model\n coupling_copula_cost_and_levels(name=my_name, rmses=my_rmses)\n","repo_name":"rpalfray/rpylib","sub_path":"scripts/mlmc/application/mlmc_applied_to_copula.py","file_name":"mlmc_applied_to_copula.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"23"} +{"seq_id":"24004417166","text":"import socket\n\nSERVER_ADDRESS = \"127.0.0.1\"\nSERVER_PORT = 1234\n\n# initializing a datagram socket\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# binding the server\nserver.bind((SERVER_ADDRESS, SERVER_PORT))\n\n# receiving data from a client\ndata, addr = server.recvfrom(10)\nprint(data.decode(), \" received from \", addr)\n\n# sending data back to the client\nstring = 'world'\nstring = string.encode()\n\n# addr = address of the sender\nserver.sendto(string, addr)","repo_name":"culbec/retele_templates","sub_path":"udp/python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"18244898425","text":"#!/usr/bin/python3\nimport os, sys, stat\n\nimport subprocess\nimport traceback\nimport pprint\nimport curses\nimport signal\n\nimport secrets\nimport base64\nimport shlex\nimport signal\nimport select\nimport socket\n\nimport argparse\n\ntry:\n import argcomplete\nexcept:\n pass\n\nimport configparser\n\nimport threading\nimport queue\nimport time\nimport datetime\n\nimport logging\n\nconfig = configparser.ConfigParser()\nconfig.read(( os.path.join(os.path.dirname(sys.argv[0]), 'escp.conf'),\n '/usr/local/etc/escp.conf', '/etc/escp.conf',\n 'escp.conf' ))\n\nESCP_VERSION = \"NA\"\nLICENSE = \"\"\"\nESnet Secure Copy (EScp) Copyright (c) 2021, The Regents of the\nUniversity of California, through Lawrence Berkeley National Laboratory\n(subject to receipt of any required approvals from the U.S. Dept. of\nEnergy). All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n(1) Redistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\n(2) Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n(3) Neither the name of the University of California, Lawrence Berkeley\nNational Laboratory, U.S. Dept. of Energy nor the names of its contributors\nmay be used to endorse or promote products derived from this software\nwithout specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\nYou are under no obligation whatsoever to provide any bug fixes, patches,\nor upgrades to the features, functionality or performance of the source\ncode (\"Enhancements\") to anyone; however, if you choose to make your\nEnhancements available either publicly, or directly to Lawrence Berkeley\nNational Laboratory, without imposing a separate written license agreement\nfor such Enhancements, then you hereby grant the following license: a\nnon-exclusive, royalty-free perpetual license to install, use, modify,\nprepare derivative works, incorporate into other computer software,\ndistribute, and sublicense such enhancements or derivative works thereof,\nin binary and source code form.\n\"\"\"\n\ntry:\n ESCP_VERSION = config[\"escp\"][\"VERSION\"]\nexcept:\n pass\n\ndef human_readable(number, figs, bits=False):\n si_prefix = \" KMGTPE\"\n\n divisor = 10000\n if figs < 4:\n divisor = 1000\n\n if not bits:\n while (number / divisor) > 1:\n number /= 1024;\n si_prefix = si_prefix[1:]\n else:\n number *= 8\n while (number > 1000):\n number /= 1000\n si_prefix = si_prefix[1:]\n\n\n if number < 1:\n number = 1\n\n sig_figs = len(str(int(number)))\n fraction = figs-sig_figs\n if fraction < 0:\n fraction = 0\n\n return \"%*.*f%s\" % (sig_figs, fraction, number, si_prefix[:1])\n\ndef show_progress( number, start_time, window, file_name, file_total=False, bits=False ):\n try:\n bites = int(number)\n y,x = window.getmaxyx()\n\n fi = \", \".join(file_name)\n if len(fi)+30>x:\n fi = fi[:x-30] + \"...\"\n fill = x - len(fi)\n\n rate = bites/(time.time() - start_time)\n\n if file_total:\n bytes_left = file_total - bites\n\n if bytes_left < 0:\n bytes_left = 0;\n\n eta = bytes_left/rate\n eta = int(eta)\n\n if eta > 1:\n delta = datetime.timedelta(seconds=eta)\n eta = str(datetime.datetime.strptime(str(delta), \"%H:%M:%S\")).split()[1]\n eta = \"%s ETA\" % eta\n else:\n eta = \"\"\n progress = \"%2.0f%% %sB %sB/s %s\" % (\n (bites/file_total)*100,\n human_readable(bites, 4),\n human_readable(rate, 2, bits),\n eta )\n else:\n\n units = \"b\" if bits else \"B\"\n\n progress = \"%sB %s%s/s\" % (\n human_readable(bites, 4),\n human_readable(rate, 2, bits),\n units\n )\n\n\n sys.stdout.write(\"\\r%s%*s\" % ( fi, fill, progress ) )\n except Exception as e:\n logging.debug(\"show_progress got an error: %s\", e)\n raise(e)\n pass\n\ndef handler_ctrlc(signal, frame):\n print(\"\\n\\rInterrupt/Ctrl-C, exiting...\")\n sys.exit(1)\n\ndef stream_write( stream, data ):\n\n if not data:\n return\n\n if isinstance( data, str ):\n data = [data,]\n\n if not isinstance( data, list ):\n raise ValueError(\"data must be a list\")\n\n for i in data:\n if isinstance( i, list ):\n stream.stdin.write( str.encode(\"%d\\n\" % len(i)) )\n stream.stdin.write( str.encode(\"%s\\n\" % \"\\n\".join(i)) )\n else:\n stream.stdin.write( str.encode(\"%s\\n\" % i) )\n\n stream.stdin.flush()\n\n\ndef stream_read( queue, data ):\n if data == None:\n return\n\n if not isinstance( data, str ) and not isinstance( data, list ):\n raise ValueError(\"data must be a string/list\")\n\n res = queue.get()\n\n if ( isinstance(data, list) ) and res[0] in data:\n return res[0]\n\n if res[0] != data:\n #raise ValueError(\"Stream Read Error\")\n print(\"Stream Read Error: '%s'\" % \" \".join(res[1:]) )\n print(\"actual='%s' != expected='%s'\" % (res[0], data) )\n sys.exit(1)\n\n return res[0]\n\n\ndef mgmt_reader( stream, stat_queue, mgmt_queue, name ):\n logging.debug(\"start mgmt_reader '%s'\" % name )\n while 1:\n line = stream.stdout.readline()\n line = line.decode(\"utf-8\")\n line = line.strip(\"\\n\")\n\n logging.debug(\"mgmt_reader '%s' got: %s\" % ( name, line ) )\n\n if not line:\n logging.debug(\"mgmt_reader '%s': connection terminated \" % name )\n stat_queue.put(\"ABORT\")\n stat_queue.put(\"Session terminated early\")\n mgmt_queue.put((\"ABORT\", \"Session terminated\"));\n return;\n\n if line in (\"OKAY\", \"REDY\", \"FILE\", \"CHDR\"):\n logging.debug(\"mgmt_reader '%s': %s\" % (name, line) )\n mgmt_queue.put((line,))\n continue\n\n if line == \"SESS\":\n line = stream.stdout.readline().decode(\"utf-8\")\n logging.debug(\"mgmt_reader '%s': SESS %s\" % (name, line) )\n #mgmt_queue.put((\"SESS\",line))\n continue\n\n if line == \"SHM\":\n line = stream.stdout.readline().decode(\"utf-8\")\n line = line.strip(\"\\n\")\n logging.debug(\"mgmt_reader '%s': SHM %s\" % (name, line) )\n mgmt_queue.put((\"SHM\",line))\n continue\n\n if line == \"STAT\":\n line = stream.stdout.readline().decode(\"utf-8\")\n line = line.strip(\"\\n\")\n logging.debug(\"mgmt_reader '%s': STAT %s \" % (name, line) )\n stat_queue.put(\"STAT\")\n stat_queue.put(int(line))\n continue\n\n if line == \"ABRT\":\n line = stream.stdout.readline().decode(\"utf-8\")\n logging.debug(\"mgmt_reader '%s': ABRT %s \" % (name, line) )\n stat_queue.put(\"ABORT\")\n stat_queue.put(line)\n mgmt_queue.put((\"ABORT\",line))\n return;\n\n if line == \"FTOT\":\n logging.debug(\"FTOT readline\")\n line = stream.stdout.readline().decode(\"utf-8\")\n line = line.strip(\"\\n\")\n count, bites= map( lambda x: int(x), line.split( \" \", maxsplit=1 ) )\n logging.debug(\"mgmt_reader '%s': FTOT fi=%d bytes=%d\" %\n (name, count, bites))\n mgmt_queue.put( (count, bites) )\n continue\n\n\n if line == \"OPEN\":\n line = stream.stdout.readline().decode(\"utf-8\")\n line = line.strip(\"\\n\")\n no, fi = line.split( \" \", maxsplit=1 )\n logging.debug(\"mgmt_reader '%s': OPEN %s %d\" % (name, fi, int(no)) )\n stat_queue.put( 'OPEN' )\n stat_queue.put( (no, fi) )\n continue\n\n if line == \"STOP\":\n line = stream.stdout.readline().decode(\"utf-8\")\n line = line.strip(\"\\n\")\n no, fi = line.split( \" \", maxsplit=1 )\n logging.debug(\"mgmt_reader '%s': CLOSE %s %d\" % (name, fi, int(no)) )\n stat_queue.put( 'STOP' )\n stat_queue.put( (no, fi) )\n continue\n\n if line == \"XIT\":\n logging.debug(\"mgmt_reader '%s': Exit successfully\" % name )\n stat_queue.put( 'EXIT' )\n mgmt_queue.put( 'EXIT' )\n return\n\n logging.debug(\"mgmt_reader '%s': not recognized '%s'\" % (name, line))\n print (\"Not recognized '%s'\" % line)\n\n\ndef progress_bar( rx_queue, tx_queue, bits=False ):\n file_count = 0\n file_completed = 0\n\n bytes_total = 0\n\n file_open = set()\n\n logging.debug(\"start progress_bar\")\n\n start_time = time.time()\n error = \"\"\n\n msg = \"Exited normally\"\n exit_count=0\n\n try:\n window = curses.initscr()\n\n while exit_count < 2:\n\n got_results = False\n\n try:\n res=rx_queue.get_nowait()\n got_results = True\n if res == \"EXIT\":\n logging.debug(\"Receiver exiting successfully\")\n exit_count+=1\n continue\n if res == \"ABORT\":\n logging.debug(\"RX got ABORT\");\n msg = rx_queue.get()\n error += \"RX %s\" % msg\n logging.info(\"RX Error: %s\" % (error) )\n exit_count+=2\n\n continue\n msg = rx_queue.get()\n logging.debug(\"RX ignoring %s: %s\" % ( res, msg ) )\n except queue.Empty:\n logging.debug(\"RX queue empty\")\n pass\n except Exception as e:\n raise ValueError(\"Unexpected exception %s\" % type(e))\n break\n\n try:\n res=tx_queue.get_nowait()\n got_results = True\n\n if res == \"EXIT\":\n logging.debug(\"Receiver exiting successfully\")\n exit_count+=1\n\n continue\n\n if res == \"ABORT\":\n logging.debug(\"TX got ABORT\");\n msg = tx_queue.get()\n error += \"TX %s\" % msg\n logging.info(\"TX Error: %s\" % error)\n exit_count+=2\n\n continue\n\n msg = tx_queue.get()\n\n if res == \"OPEN\":\n number, fn = msg\n file_count += 1\n file_open.add(fn)\n logging.debug(\"TX got OPEN on %s\" % fn);\n continue\n\n if res == \"STOP\":\n number, file_name = msg\n file_completed += 1\n file_open.remove(file_name)\n logging.debug(\"TX got STOP on %s\" % file_name);\n continue\n\n if res == \"STAT\":\n m = str(msg)\n logging.debug(\"TX got STAT %s\" % m);\n show_progress(msg, start_time, window, file_open, bits=bits)\n continue\n\n logging.debug(\"TX got Unknown operator '%s' %s\" % (res, msg) )\n except queue.Empty:\n logging.debug(\"TX queue mt\")\n pass\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)\n raise(e)\n break\n\n if not got_results:\n time.sleep(0.1)\n logging.debug(\"looping\")\n finally:\n curses.endwin()\n\n logging.debug(\"Progress bar is finished: '%s'\" % (error) )\n\n if error:\n print( \"\\nTransfer terminated: \", error )\n logging.debug(\"System terminate!\")\n sys.exit(1)\n\ndef file_recurse( self, files, path=None ):\n\n total = 0\n\n file_list = []\n dir_list = []\n flush = 15\n\n logging.debug(\"file_recurse: path=%s\", path)\n\n for fi in files:\n if path:\n fi = os.path.join( path, fi )\n fi_stat = os.stat(fi)\n if stat.S_ISDIR(fi_stat.st_mode):\n dir_list.append(fi)\n continue\n\n if not stat.S_ISREG(fi_stat.st_mode):\n logging.debug(\"Skipping %s, it is neither a file nor directory\" % fi)\n\n total += fi_stat.st_size\n file_list.append(fi)\n flush += 1\n\n if flush > 20:\n self.push_tx( [\"FILE\", file_list], \"OKAY\" )\n flush=0\n file_list=[]\n pass\n\n if file_list:\n self.push_tx( [\"FILE\", file_list], \"OKAY\" )\n\n for d in dir_list:\n total += file_recurse( self, os.listdir(d), path=d )\n\n return total\n\n\ndef run_transfer( self ):\n self.push_tx( [\"STAT\"], \"OKAY\" )\n\n total = file_recurse( self, self.args.files[:-1] )\n logging.debug(\"File total: %d\", total)\n\n try:\n self.push_tx( [\"DONE\"], \"OKAY\")\n print (\"\")\n except:\n print(\"Transfer failed.\")\n res = self.rx_mgmt.get_nowait()\n if res != \"EXIT\":\n print (\"Error: \", res)\n\nclass EScp:\n def push_rx( self, option, response=None ):\n stream_write( self.rx_cmd, option )\n return stream_read( self.rx_mgmt, response )\n\n def push_tx( self, option, response=None ):\n stream_write( self.tx_cmd, option )\n return stream_read( self.tx_mgmt, response )\n\n def parseArgs(self, args=sys.argv[1:]):\n parser = argparse.ArgumentParser(\n description='EScp: Secure Network Transfer',\n fromfile_prefix_chars='@')\n\n parser.add_argument('files', metavar='FILE', nargs='*',\n help='[SRC] ... [DST], where DST is HOST:PATH')\n\n parser.add_argument('-P','--port', metavar='PORT',\n help=\"Port for SSH[/DTN]\" )\n parser.add_argument('-q','--quiet', action='store_const', const=1)\n parser.add_argument('-v','--verbose', action='store_const', const=1)\n parser.add_argument('-l','--license', action='store_const', const=1)\n\n parser.add_argument('--bits', action='store_const', const=1,\n help=\"Show progress in bits/s\")\n parser.add_argument('--direct', action='store_const', const=1,\n help=\"Enable direct I/O\")\n\n parser.add_argument('--args_dst', metavar='ARG', type=str,\n help=\"Arguments to DST DTN Executable\")\n parser.add_argument('--args_src', metavar='ARG', type=str,\n help=\"Arguments to SRC DTN Executable\")\n parser.add_argument('--path_dst', metavar='PATH', type=str,\n help=\"Path to DST DTN Executable\")\n parser.add_argument('--path_src', metavar='PATH', type=str,\n help=\"Path to SRC DTN Executable\")\n parser.add_argument('--version', action='store_const', const=1)\n\n try:\n argcomplete.autocomplete(parser)\n except:\n pass\n\n args = parser.parse_args(args)\n\n if args.license:\n print (LICENSE)\n sys.exit(0)\n\n if args.version:\n print (\"EScp: %s\" % ESCP_VERSION )\n dtn = \"dtn\"\n try:\n dtn = config[\"escp\"][\"dtn_path\"]\n except:\n pass\n s=subprocess.run([dtn, \"--version\"], capture_output=True)\n s = s.stdout.decode(\"utf-8\").strip()\n print (\"DTN: %s\" % s)\n sys.exit(0)\n\n if not args.files or (len(args.files) < 2):\n print (\"both SRC and DST must be specified\")\n sys.exit(1)\n\n for i in args.files[:-1]:\n if not os.path.exists(i):\n print(\"Source file '%s' not found\" % i)\n sys.exit(-1)\n\n self.args = args\n\n def applyArgs(self):\n ssh_host = False\n dst_host = \"localhost\"\n\n ssh_opts = \"\"\n ssh_port = False\n dtn_port = 2222\n\n if self.args.port:\n try:\n ssh_port, dtn_port = map( int, self.args.port.split(\"/\"))\n except:\n ssh_port = int( self.args.port )\n\n if ssh_port:\n ssh_opts += \"-p %d\" % ssh_port\n\n try:\n ssh_host, dst_path = self.args.files[-1].split(\":\")\n parts = ssh_host.split(\"@\")\n if len(parts) > 1:\n dst_user, dst_host = parts\n else:\n dst_host = ssh_host\n except:\n dst_path = self.args.files[-1]\n\n if (self.args.path_dst):\n remote_dtn = self.args.path_dst\n else:\n try:\n remote_dtn = config[dst_host][\"dtn_path\"]\n except:\n remote_dtn = \"dtn\"\n\n if (self.args.path_src):\n local_dtn = self.args.path_src\n else:\n try:\n local_dtn = config[\"escp\"][\"dtn_path\"]\n except:\n local_dtn = \"dtn\"\n\n\n args_dst = []\n\n try:\n args_dst += shlex.split(config[dst_host][\"dtn_args\"])\n except:\n pass\n\n args_dst += [ \"-s\", \"--managed\" ]\n\n if (self.args.args_dst):\n args_dst += shlex.split(self.args.args_dst)\n\n args_src = [ local_dtn, ]\n\n try:\n args_src += shlex.split(config[\"escp\"][\"dtn_args\"])\n except:\n pass\n\n \"\"\"\n This is our host logic:\n 1) ssh_host is always whatever is specified in command line, this is\n so that the ssh_host you use should match your known_hosts file.\n 2) We then convert the host (or IP) into an IP; This is used for DTN\n 3) DTN receiver will listen on this interface\n 4) DTN sender will connect to listed IP\n \"\"\"\n\n ip_addr = socket.getaddrinfo(dst_host, None)[0][4][0]\n\n\n args_src += [\"--managed\", \"-c %s/%d\" % (ip_addr, dtn_port)]\n args_dst += [\"-c\", \"%s/%d\" % (ip_addr, dtn_port) ]\n\n if not self.args.direct:\n args_src += [\"--nodirect\",]\n args_dst += [\"--nodirect\",]\n\n if self.args.verbose:\n args_src += [ \"--verbose\", \"--logfile\", \"/tmp/dtn.tx.log\" ]\n args_dst += [ \"--verbose\", \"--logfile\", \"/tmp/dtn.rx.log\" ]\n logging.basicConfig(filename='/tmp/escp.log', level=logging.DEBUG)\n\n logging.info(\"Starting EScp: %s\" % ESCP_VERSION)\n\n if (self.args.args_src):\n args_src += shlex.split(self.args.args_src);\n\n sekret = secrets.token_bytes(16)\n sekret = base64.b64encode(sekret)\n sekret = sekret.decode(\"utf-8\")\n sekret = sekret.replace(\"=\", \"\")\n\n if ssh_host:\n ssh_args = [ \"ssh\", ssh_host, remote_dtn ]\n else:\n ssh_args = [ local_dtn, ]\n\n ssh_args += args_dst\n\n\n if len(ssh_opts):\n ssh_args.insert(1, ssh_opts)\n\n if (self.args.verbose):\n print(\"local_dtn: %s, remote_dtn: %s\" % (local_dtn, remote_dtn) )\n print(\"dst_host: %s, dst_path: %s\" % ( dst_host, dst_path ))\n print(\"Auth secret = ...%s\" % sekret[-4:])\n print(\"SSH command = '%s'\" % \" \".join(ssh_args))\n print(\"Local command = '%s'\" % \" \".join(args_src))\n\n if not dst_path:\n dst_path=\".\"\n\n self.dst_path = dst_path\n self.receiver_args = ssh_args\n self.sender_args = args_src\n\n self.sekret = sekret\n\n def connect(self):\n self.rx_cmd = subprocess.Popen( self.receiver_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE )\n self.rx_mgmt = queue.Queue()\n self.rx_stat = queue.Queue()\n\n self.rx_thread = threading.Thread(target=mgmt_reader,\n args=(self.rx_cmd, self.rx_stat, self.rx_mgmt, \"RX\"), daemon=True)\n self.rx_thread.start()\n\n try:\n self.push_rx( None, \"REDY\" )\n except:\n print(\"Error connecting to host: \", self.rx_cmd.stderr.read().decode());\n sys.exit(0)\n\n self.push_rx( [\"HASH\"], \"OKAY\" )\n self.push_rx( [\"CKEY\", self.sekret], \"OKAY\" )\n\n #self.push_rx( [\"FILE\", self.dst_files], \"OKAY\" )\n if self.dst_path:\n res = self.push_rx( ['CHDR', self.dst_path], [\"CHDR\",\"FILE\"] )\n if res == \"FILE\":\n if len(self.args.files[:-1]) != 1:\n self.push_rx( [\"EXIT\"], \"OKAY\" );\n print(\"target '%s' is not a directory\" % self.dst_path)\n sys.exit(0)\n self.push_rx( [\"FILE\", [self.args.files[1]]], \"OKAY\" )\n\n self.push_rx( [\"RECV\"], \"OKAY\" )\n\n self.tx_cmd = subprocess.Popen( self.sender_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE )\n\n self.tx_mgmt = queue.Queue()\n self.tx_stat = queue.Queue()\n\n self.tx_thread = threading.Thread(target=mgmt_reader,\n args=(self.tx_cmd, self.tx_stat, self.tx_mgmt, \"TX\"), daemon=True)\n self.tx_thread.start()\n\n self.push_tx( None, \"REDY\")\n self.push_tx( [\"HASH\"], \"OKAY\" )\n self.push_tx( [\"CKEY\", self.sekret], \"OKAY\" )\n\n del self.sekret\n self.sekret = \"Meow! I'm a cat!\"\n\n #self.push_tx( [\"FILE\", self.src_files], \"OKAY\" )\n self.push_tx( [\"PERS\"], \"OKAY\" )\n self.push_tx( [\"SEND\"], \"OKAY\" )\n\n self.m_thread = \\\n threading.Thread(target=run_transfer, args=(self,), daemon=True)\n self.m_thread.start()\n\n # time.sleep(0.1)\n progress_bar( self.rx_stat, self.tx_stat, self.args.bits )\n\n self.m_thread.join()\n\n\n #self.push_rx( [\"DONE\"], \"OKAY\" )\n #print (\"Finished assigning options\")\n\n\n def __init__(self, doInit=True):\n if doInit:\n self.parseArgs()\n self.applyArgs()\n\nif __name__ == \"__main__\":\n\n signal.signal(signal.SIGINT, handler_ctrlc)\n\n escp = EScp()\n\n try:\n escp.connect()\n except Exception as e:\n print(\"Error: \", e)\n sys.exit(1)\n\n","repo_name":"esnet/EScp","sub_path":"scripts/escp.py","file_name":"escp.py","file_ext":"py","file_size_in_byte":20456,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"23"} +{"seq_id":"38119274958","text":"print(\"BEGIN ...\")\nimport time\n\ndense_layers = [0,1,2]\nlayer_sizes = [32,64,128]\nconv_layers = [1,2,3]\n\n\nimport time\nimport tensorflow as tf\nprint(\"DEBUG 0 ...\")\nfrom keras.datasets import cifar10\nprint(\"DEBUG 1 ...\")\nfrom keras.preprocessing.image import ImageDataGenerator\nprint(\"DEBUG 2 ...\")\nfrom keras.models import Sequential\nprint(\"DEBUG 3 ...\")\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nprint(\"DEBUG 4 ...\")\nfrom keras.callbacks import TensorBoard\nprint(\"DEBUG 5 ...\")\nimport pickle\n\nNAME = \"Cats-vs-Dogs-cnn-64x2_{}\".format(int(time.time()))\n\nprint(\"LOADING DATAS...\")\n\nsess = tf.Session(config=tf.ConfigProto())\n\nX = pickle.load(open(\"../02/X.pickle\", \"rb\"))\ny = pickle.load(open(\"../02/y.pickle\", \"rb\"))\n\n# resize images to 0-1\nX = X/255.0\n\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n print(\"DEBUG LOOP ...\")\n tensorboard = TensorBoard(log_dir=\"logs/{}\".format(NAME))\n NAME = \"{}___{}_conv-{}_nodes-{}_dense\".format(int(time.time()), conv_layer, layer_size, dense_layer)\n print(NAME)\n\n model = Sequential()\n\n model.add( Conv2D(64, (3,3), input_shape = X[0].shape) )\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2)))\n\n for l in range(conv_layer-1): # -1 bcs we already the one beside\n model.add( Conv2D(64, (3,3)) )\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Flatten())\n\n for l in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation(\"relu\"))\n\n model.add(Dense(1))\n model.add(Activation(\"relu\"))\n\n model.compile(\n loss=\"binary_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"])\n\n model.fit(X, y, batch_size=32, epochs=10, validation_split=0.1, callbacks=[tensorboard])\n","repo_name":"boehm-e/learn_keras_tf","sub_path":"05_tensorboard_compare/model_compare.py","file_name":"model_compare.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"29039548321","text":"from xml.etree import ElementTree as ET\n\nfrom jenkins import Jenkins\nfrom jenkins import EMPTY_CONFIG_XML\n\nfrom buttler.config import get_config\n\n\nclass JenkinsWrapper:\n def __init__(self):\n config = get_config()\n self.server = Jenkins(\n config.jenkins_url,\n username=config.jenkins_user,\n password=config.jenkins_password)\n # DON'T REMOVE: workaround to await connection\n self.server.get_info()\n\n def print_version(self):\n version = self.server.get_version()\n print(f\"Jenkins is on version {version}\")\n\n def create_list_view(self, name: str):\n config_xml = \"\"\"\n \n \n \n \"\"\"\n\n if self._exists_view(name):\n print(\"View exists already. Nothing was done.\")\n else:\n self.server.create_view(name, config_xml)\n print(f\"View {name} was created successfully.\")\n\n def create_job(self, name: str, view: str):\n if self._exists_job(name):\n print(f\"Job {name} already exists. Nothing was done.\")\n return\n if not self._exists_view(view):\n self.create_list_view(view)\n self.server.create_job(name, EMPTY_CONFIG_XML)\n print(f\"Job {name} was created successfully.\")\n self._add_job_to_view(name, view)\n print(f\"Job has been added to the view {view}\")\n\n def _exists_job(self, name: str) -> bool:\n jobs = [job.get(\"name\") for job in self.server.get_jobs()\n if job.get(\"name\")]\n return name in jobs\n\n def _exists_view(self, name: str) -> bool:\n views = [view.get(\"name\") for view in self.server.get_views()\n if view.get(\"name\")]\n return name in views\n\n def _add_job_to_view(self, job_name, view_name):\n new_job = \"\"\"\n %s\n \"\"\", job_name\n config = self.server.get_view_config(view_name)\n tree = ET.ElementTree(ET.fromstring(config))\n old_jobs_node = tree.find(\".//jobNames\")\n # append job node\n new_job = ET.SubElement(old_jobs_node, 'string')\n new_job.text = job_name\n # get string from tree\n config_xml = ET.tostring(tree.getroot()).decode('utf-8')\n # update config\n self.server.reconfig_view(view_name, config_xml)\n","repo_name":"busykoala/setup-butler","sub_path":"buttler/jenkins_wrapper.py","file_name":"jenkins_wrapper.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"25699862206","text":"# import jax\n# jax.config.update(\"jax_enable_x64\", True)\n\nimport numpy as np\nimport jax.numpy as jnp\nimport pickle \n\nimport function.config as func_config\nfrom function.helper_functions import construction_Zred_Fred_dZreddw_dFreddw\nfrom function.target_function import target_function_maker\nfrom function.domain_sampler import domain_sampler\n\nfrom optimizer.optimize import optimize\nimport optimizer.config as opt_config\n\nfrom logger.visualize import visualize\n\nimport time\nfrom datetime import timedelta\n\nif __name__ == '__main__':\n\n if opt_config.checkpoint_path is not None:\n checkpoint = pickle.load(opt_config.checkpoint_path)\n omega_start = checkpoint['omega_start']\n omega_end = checkpoint['omega_end']\n omega_steps = checkpoint['omega_steps']\n solutions_storage = checkpoint['solutions_storage']\n else:\n omega_start = func_config.omegas_range[0]\n omega_end = func_config.omegas_range[1]\n omega_steps = func_config.omegas_steps\n solutions_storage = []\n \n global_start_time = time.time()\n\n for omega_idx, omega in enumerate(np.linspace(omega_start, omega_end, omega_steps)):\n\n omega_start_time = time.time()\n\n Z, Fh = construction_Zred_Fred_dZreddw_dFreddw(omega,func_config.M,func_config.C,func_config.K,func_config.F,func_config.Nh,func_config.beta,func_config.gamma,func_config.ddl_ln,func_config.ddl_nl,func_config.derivatives,func_config.penalite,func_config.nu)\n\n target_function_hparams = {'Z':Z, 'Fh':Fh, 'nb_dims':2*func_config.Nh+1}\n target_function = target_function_maker(target_function_hparams)\n\n optimization_hparams = {'nb_points':opt_config.nb_points,\n 'iterations':opt_config.iterations,\n 'kept_threshold':opt_config.kept_threshold,\n 'merge_threshold':opt_config.merge_threshold,\n 'max_history':opt_config.max_history,\n 'seed':omega_idx}\n\n solutions, loss = optimize(target_function, target_function_hparams, domain_sampler, optimization_hparams)\n if solutions is not None:\n solutions_storage.append([omega, solutions])\n checkpoint = {'omega_start':omega+(func_config.omegas_range[1]-func_config.omegas_range[0])/func_config.omegas_steps,\n 'omega_end':func_config.omegas_range[1],\n 'omega_steps':func_config.omegas_steps-omega_idx-1,\n 'solutions_storage':solutions_storage}\n with open('./checkpoint/checkpoint.pt', 'wb') as file:\n pickle.dump(checkpoint, file)\n print(\"For omega =\", omega, \",\", solutions.shape[0], \"solutions were found.\")\n print(\"With losses :\", loss)\n else:\n print(\"For omega =\", omega, \",\", \"no solutions were found.\")\n print(\"Best loss reached :\", loss)\n\n omega_end_time = time.time()\n \n time_since_global_start = omega_end_time - global_start_time\n print(\"Time elapsed since beginning :\", str(timedelta(seconds=time_since_global_start)))\n current_omega_duration = omega_end_time-omega_start_time\n print(\"Current omega optimization duration :\", str(timedelta(seconds=current_omega_duration)))\n if omega_idx == 0:\n avg_omega_duration = current_omega_duration\n else:\n avg_omega_duration = (omega_idx * avg_omega_duration + current_omega_duration)/(omega_idx+1)\n print(\"Expected remaining time :\", str(timedelta(seconds=avg_omega_duration*(omega_steps-omega_idx-1))))\n print(\"------------\")\n\n\n amps_lin = []\n for omega, solutions in solutions_storage:\n solutions_amps_lin = []\n for solution_idx in range(solutions.shape[0]):\n solution = solutions[solution_idx]\n x_t = func_config.IDFT_1ddl@solution\n solutions_amps_lin.append(jnp.abs(x_t).max())\n amps_lin.append([omega, solutions_amps_lin])\n\n visualize(amps_lin)\n\n","repo_name":"liris-tduboudin/Optimizator","sub_path":"optimizator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"181142411","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n#author : zhangdawang\n#data: 2018-3\n#difficulty degree:\n#problem: 784_Letter_Case_Permutation.py\n#time_complecity: \n#space_complecity: \n#beats: \n\nclass Solution(object):\n #dfs,若碰到字母,则大小写分别dfs,若为数字,直接向下DFS\n def letterCasePermutation(self, S):\n def dfs(i, tmp):\n if i == n:\n res.append(tmp)\n return\n if S[i].isalpha():\n dfs(i + 1, tmp + S[i].lower())\n dfs(i + 1, tmp + S[i].upper())\n else:\n dfs(i + 1, tmp + S[i])\n\n res = []\n n = len(S)\n dfs(0, \"\")\n return res","repo_name":"ZDawang/leetcode","sub_path":"784_Letter_Case_Permutation.py","file_name":"784_Letter_Case_Permutation.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"23"} +{"seq_id":"26085783148","text":"from turtle import Turtle\nimport time\nclass Bullet(Turtle):\n def __init__(self, wn, xi, yi, speed, direction):\n super().__init__()\n self.ht()\n self.wn = wn\n self.goto(xi, yi)\n self.st()\n self.speed = speed\n self.setheading(direction)\n \n def run():\n self.forward(self.speed)\n","repo_name":"cswonders/turtle-shooting-game","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"35420336002","text":"from PIL import Image, ImageDraw, ImageFont\nimport math\nimport platform\nimport subprocess\n\n\ndef polygon(brackets, name, choice):\n # brackets = \"(()())\"\n l = 300 # длина ребра\n p = 50 # отступ\n r = 40 # радиус круга\n outline = 3 # толщина круга\n set = 10 # отступ цифр\n font = ImageFont.truetype(\"arial.ttf\", 40)\n n = len(brackets) / 2 + 2 # кол-во вершин\n R = l / (2 * math.sin(math.pi / n))\n img = Image.new('RGB', (int(R + p) * 2 + 1, int(R + p) * 2 + 1), (255, 255, 255))\n draw = ImageDraw.Draw(img)\n center = [R + p, R + p]\n coord = []\n for i in range(0, int(n)):\n alph = 2 * math.pi * i / n - math.pi / n # угол поворота\n row = [R + p + R * math.sin(alph), R + p + R * math.cos(alph)]\n coord.append(row)\n draw.line((coord[i - 1][0], coord[i - 1][1], coord[i][0], coord[i][1]), fill=0, width=4)\n draw.line((coord[0][0], coord[0][1], coord[i][0], coord[i][1]), fill=0, width=4)\n draw.line((coord[0][0], coord[0][1], coord[1][0], coord[1][1]), fill=127, width=5)\n partition(draw, coord, brackets, int(n), 0, 1)\n for i in range(0, int(n)):\n draw.ellipse((coord[i][0] - r - outline, coord[i][1] - r - outline, coord[i][0] + r + outline, coord[i][1] + r + outline), fill='black')\n draw.ellipse((coord[i][0] - r, coord[i][1] - r, coord[i][0] + r, coord[i][1] + r), fill='white')\n if choice:\n if (i + 1) > 9:\n set = 23\n draw.text((coord[i][0] - set, coord[i][1] - 20), '%d' % (i + 1), fill=\"black\", font=font)\n del draw\n img = img.resize((int(R + p) + 1, int(R + p) + 1), Image.ANTIALIAS)\n img.save(name, subsampling=0, quality=100)\n return 0\n\n\ndef get(brackets):\n i = j = 0\n while True:\n if brackets[i] != 0:\n if brackets[i] == '(':\n j += 1\n else:\n j -= 1\n i += 1\n if j == 0:\n break\n return i # кол-во скобок внутри левой части\n\n\ndef partition(draw, coord, brackets, n, a, b):\n left = int(get(brackets) / 2 + 1)\n right = int(n - left + 1)\n if left > 2:\n draw.line((coord[a][0], coord[a][1], coord[b + right - 1][0], coord[b + right - 1][1]), fill=0, width=3)\n br = brackets[1: 2 * (left - 2) + 1]\n partition(draw, coord, br, left, a, (b + right - 1))\n if right > 2:\n draw.line((coord[b][0], coord[b][1], coord[b + right - 1][0], coord[b + right - 1][1]), fill=0, width=3)\n br = brackets[2 * (left - 2) + 2:]\n if b == 0 or b > (b + right - 1):\n partition(draw, coord, br, right, b, (b + right - 1))\n else:\n partition(draw, coord, br, right, (b + right - 1), b)\n pass\n\n\ndef check(brackets):\n if len(brackets) > 80:\n return 4\n j = 0\n for i in range(len(brackets)):\n if brackets[i] == '(':\n j += 1\n elif brackets[i] == ')':\n j -= 1\n else:\n return 2\n if j < 0:\n return 1\n if j != 0:\n return 1\n return 0\n\n\ndef callPolygon(brackets, name, numbering):\n if platform.system() == 'Linux':\n return polygon(brackets, name, numbering)\n else:\n return polygon(brackets, 'Calculate/' + name, numbering)\n\n\ndef callBinTree(brackets, name, numbering):\n if numbering:\n if platform.system() == 'Linux':\n return subprocess.call(['./Cat_Br_Tr_Num.o', brackets, name])\n else:\n return subprocess.call(['Cat_Br_Tr_Num.exe', brackets, 'Calculate/' + name])\n else:\n if platform.system() == 'Linux':\n return subprocess.call(['./Cat_Br_Tr.o', brackets, name])\n else:\n return subprocess.call(['Cat_Br_Tr.exe', brackets, 'Calculate/' + name])\n\n\ndef callRootTree(brackets, name, numbering):\n if numbering:\n if platform.system() == 'Linux':\n return subprocess.call(['./Cat_Tree_Win_Num.o', brackets, name])\n else:\n return subprocess.call(['Cat_Tree_Win_Num.exe', brackets, 'Calculate/' + name])\n else:\n if platform.system() == 'Linux':\n return subprocess.call(['./Cat_Tree_Win.o', brackets, name])\n else:\n return subprocess.call(['Cat_Tree_Win.exe', brackets, 'Calculate/' + name])\n\n\ndef callTableJung(brackets, name):\n if platform.system() == 'Linux':\n return subprocess.call(['./Cat_Jung.o', brackets, name])\n else:\n return subprocess.call(['Cat_Jung.exe', brackets, 'Calculate/' + name])\n","repo_name":"goo-goo-goo-joob/Catalan-Number","sub_path":"Calculate/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"72187669178","text":"def largest_product(arr):\n \"\"\"\n Write a function which takes a 2D array and returns the largest product of\n 2 adjacent values. first function runs the other two through it.\n \"\"\"\n largest = 0\n if len(arr) == 0:\n return largest\n # if len(arr) == 1:\n # return largest_product_array(arr[0], largest)\n # for i in range(len(arr) - 1):\n # largest = largest_product_array(arr[i], largest)\n # largest = adjacent_products([i]), length)\n # return largest\n\n def largest_product_array(arr, largest):\n \"\"\" tests largest against inner arrays on first level \"\"\"\n temp_largest = arr[0] * arr[1] > largest\n if temp_largest > largest:\n return temp_largest\n return largest\n\n def adjacent_products(arr1, arr2, largest):\n \"\"\"\" tests next level down arrays againts largest \"\"\"\n temp1 = arr1[0] * arr2[0]\n temp2 = arr1[1] * arr2[1]\n if temp1 > largest and temp1 > temp2:\n return temp1\n elif temp2 > largest and temp2 > temp1:\n return temp2\n return largest\n\n # largest = 0\n # if len(arr) == 0:\n # return largest\n if len(arr) == 1:\n return largest_product_array(arr[0], largest)\n for i in range(len(arr) - 1):\n largest = largest_product_array(arr[i], largest)\n largest = adjacent_products([i], length)\n return largest\n","repo_name":"jayadams011/data-structures-and-algorithms","sub_path":"challenges/largest_product_array/largest_product.py","file_name":"largest_product.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"72531750140","text":"import source.ResultHandler.PartyRegionResults as prr\nimport source.ResultHandler.TotalPartyResults as tpr\nimport source.ResultHandler.Polling as Polling\nfrom source.ResultHandler.Classes import Results, PollingInfo, Poll\nimport random\n\ndef getresults(scenario):\n results=Results(scenario)\n results.partyregionresults=prr.main(scenario)\n results.totalpartyresults=tpr.gettotalresults(scenario, results.partyregionresults)\n\n #results.print()\n return results\n\ndef makepoll(gamedata):\n poll=Poll(gamedata.scenario, gamedata.scenario.main.currentdate-gamedata.scenario.main.turnlength*random.uniform(0,1)) #randomizes date inbetween interval of previous turn and current one\n poll=Polling.getnewpoll(gamedata, poll)\n return poll\n\ndef getpolling(gamedata, polling=None, count=1):\n if polling==None:\n polling=PollingInfo(gamedata.scenario, None, [])\n\n for i in range(count):\n poll=Poll(gamedata.scenario, gamedata.scenario.main.currentdate-gamedata.scenario.main.turnlength*random.uniform(0,1)) #randomizes date inbetween interval of previous turn and current one\n polling.polls.append(Polling.getnewpoll(gamedata, poll))\n\n polling.aggregated=Polling.aggregatepolls(gamedata, polling)\n\n #Sort polls by current winner\n regionsort=[str(i.party.fullname+\"-\"+i.region.name) for i in polling.aggregated.partyregionresults]\n totalsort=[str(i.party.fullname) for i in polling.aggregated.totalpartyresults]\n \n for i in polling.polls:\n i.partyregionresults=sorted(i.partyregionresults, key=lambda x: regionsort.index(str(x.party.fullname+\"-\"+x.region.name)))\n i.totalpartyresults=sorted(i.totalpartyresults, key=lambda x: totalsort.index(str(x.party.fullname)))\n\n return polling\n\ndef getelection(gamedata, turns):\n \n\n return None","repo_name":"AuriTheGreat/McGovern","sub_path":"source/ResultHandler/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"23"} +{"seq_id":"9890011292","text":"from re import I\nimport time\nimport crypten\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport logging\n\n\n#logging.basicConfig(filename='example.log',level=logging.INFO)\n#logging.getLogger('matplotlib').setLevel(logging.INFO)\n\ncrypten.init()\n\n\ndef time_measure(func):\n def wrapper():\n time_start = time.time()\n func()\n time_m = time.time()-time_start\n return time_m\n return wrapper\n\n@crypten.mpc.run_multiprocess(2)\n@time_measure\ndef foo():\n ct1 = crypten.cryptensor([[0.01]],src=0)\n ct2 = crypten.cryptensor([[0.01]],src=1)\n\n ct3 = ct1*ct2\n\ndef average_time(l):\n av=[0 for i in range(len(l[0]))]\n print(l)\n av = np.array(av, dtype=float)\n for i in l:\n av+= np.array(i)\n return av/len(l)\n\ndef run_exp(steps):\n time_l= []\n bits = [i for i in range(2,40,1)]\n for _ in range(steps):\n time_l.append([])\n for b in bits:\n crypten.config.cfg.encoder.precision_bits=b\n t = foo()\n time_l[_].append(max(t)) \n print(_)\n return time_l\n \n \nres=average_time(run_exp(20))\nprint(res)\nplt.plot([i for i in range(2,40,1)], res)\nplt.show()\n\n# standard dev\n# weghts 10-2","repo_name":"yavuzkakin/privacy-preserving-SNN","sub_path":"time/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"19826392640","text":"import numpy as np\r\n\r\n\r\nenvironment_rows = 10\r\nenvironment_columns = 10\r\nq_values = np.zeros((environment_rows, environment_columns, 4))\r\n\r\n\r\nactions = ['up', 'right', 'down', 'left']\r\n\r\n\r\nrewards = np.full((environment_rows, environment_columns), -1.)\r\n\r\nrewards[9, 7] = 100.\r\nrewards[9, 8] = 200.\r\nrewards[9, 9] = 300.\r\n\r\nrewards[5, 3] = -1000.\r\nrewards[5, 4] = -1000.\r\nrewards[5, 5] = -1000.\r\nrewards[5, 6] = -1000.\r\n \r\nfor row in rewards:\r\n print(row)\r\n\r\n\r\ndef isTerminalState(current_row_index, current_column_index):\r\n if rewards[current_row_index, current_column_index] == -1.:\r\n return False\r\n else:\r\n return True\r\n\r\ndef getStartingLocation():\r\n current_row_index = np.random.randint(environment_rows)\r\n current_column_index = np.random.randint(environment_columns)\r\n while isTerminalState(current_row_index, current_column_index):\r\n current_row_index = np.random.randint(environment_rows)\r\n current_column_index = np.random.randint(environment_columns)\r\n return current_row_index, current_column_index\r\n\r\ndef getNextAction(current_row_index, current_column_index, epsilon):\r\n if np.random.random() < epsilon:\r\n return np.argmax(q_values[current_row_index, current_column_index])\r\n else:\r\n return np.random.randint(4)\r\n\r\ndef getNextLocation(current_row_index, current_column_index, action_index):\r\n new_row_index = current_row_index\r\n new_column_index = current_column_index\r\n if actions[action_index] == 'up' and current_row_index > 0:\r\n new_row_index -= 1\r\n elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:\r\n new_column_index += 1\r\n elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:\r\n new_row_index += 1\r\n elif actions[action_index] == 'left' and current_column_index > 0:\r\n new_column_index -= 1\r\n return new_row_index, new_column_index\r\n\r\ndef getShortestPath(start_row_index, start_column_index):\r\n if isTerminalState(start_row_index, start_column_index):\r\n return []\r\n else:\r\n current_row_index, current_column_index = start_row_index, start_column_index\r\n shortest_path = []\r\n shortest_path.append([current_row_index, current_column_index])\r\n while not isTerminalState(current_row_index, current_column_index):\r\n action_index = getNextAction(current_row_index, current_column_index, 1.)\r\n current_row_index, current_column_index = getNextLocation(current_row_index, current_column_index, action_index)\r\n shortest_path.append([current_row_index, current_column_index])\r\n return shortest_path\r\n\r\n###\r\n\r\nepsilon = 0.4\r\ndiscount = 0.9\r\nlearning_rate = 0.9\r\n\r\nfor episode in range(1000):\r\n row_index, column_index = getStartingLocation()\r\n\r\n while not isTerminalState(row_index, column_index):\r\n action_index = getNextAction(row_index, column_index, epsilon)\r\n\r\n old_row_index, old_column_index = row_index, column_index\r\n row_index, column_index = getNextLocation(row_index, column_index, action_index)\r\n \r\n reward = rewards[row_index, column_index]\r\n old_q_value = q_values[old_row_index, old_column_index, action_index]\r\n temporal_difference = reward + (discount * np.max(q_values[row_index, column_index])) - old_q_value\r\n\r\n new_q_value = old_q_value + (learning_rate * temporal_difference)\r\n q_values[old_row_index, old_column_index, action_index] = new_q_value\r\n\r\nprint('Training complete!')\r\n\r\n###\r\n\r\nx = getShortestPath(0, 0)\r\nprint(getShortestPath(0, 0))\r\n\r\n","repo_name":"DjMitelman/Tiago","sub_path":"qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"72689703420","text":"#!-*-coding:UTF-8-*-\nfrom django.urls import path\nfrom .import views\n#函数 path() 具有四个参数,两个必须参数:route 和 view,两个可选参数:kwargs 和 name\napp_name = 'polls' #为 URL 名称添加命名空间\n\"\"\"\nurlpatterns = [\n path('',views.index,name='index'),#path(‘’(链接),方法名,name=‘’),path的格式\n # path('nono/',views.nono,name='i'),#自己测试语句,\n path('/', views.detail, name='detail'),\n path('/results/',views.results,name='results'),#结果页\n path('/vote/',views.vote,name='vote'), #投票页\n]\n\"\"\"\n#优化1:导入通用试图\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.DetailView.as_view(), name='detail'),\n path('/results/', views.ResultsView.as_view(), name='results'),\n path('/vote/', views.vote, name='vote'),\n path('login/',views.login,name='login'),\n]","repo_name":"CanJam/mysecond","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"38476851282","text":"#! /usr/bin/env python\n\nfrom Tkinter import *\nimport sys, os\n\nclass Ball:\n def __init__(self, letter):\n self.root = Tk()\n self.canvas = Canvas(self.root, width=100, height=100)\n self.canvas.pack()\n\n self.root.bind('q', self.Exit)\n\n self.x, self.y = 50, 50\n self.vector = 1\n self.sz = 8\n\n self.ballID = self.canvas.create_oval(self.x-self.sz, self.y-self.sz,\n self.x+self.sz, self.y+self.sz,\n fill='orange',\n outline='orange')\n self.textID = self.canvas.create_text(self.x, self.y,\n anchor='c', text=letter)\n def Go(self):\n self.Animate()\n self.root.mainloop()\n def Exit(self, event):\n sys.exit(0)\n def Animate(self):\n self.x += self.vector\n if self.x > (100-self.sz) or self.x < self.sz:\n self.vector = -self.vector\n self.canvas.coords(self.ballID,\n self.x-self.sz, self.y-self.sz,\n self.x+self.sz, self.y+self.sz)\n self.canvas.coords(self.textID, self.x, self.y)\n self.root.after(20, self.Animate)\n\n# main\nb = Ball(sys.argv[1])\nb.Go()\n","repo_name":"M1c17/OP_three_easy_steps","sub_path":"lecture-notes.04.10.2018/01-Intro/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"35159865913","text":"\"\"\"\nURL configuration for depression_Helper project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include,re_path\nfrom Main_App import views\nfrom django.views.static import serve\nfrom django.conf import settings\nfrom django.contrib.auth import views as auth_views\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name=\"home\"),\n path(\"accounts/\", include(\"allauth.urls\")), #most important\n path('choosing', views.choosing, name=\"choosing\"),\n path(\"acc\", views.acc, name=\"acc\"), #most important\n path('google_bard_response/', views.google_bard_response),\n\n\n path('doctor-signup', views.doctor_signup_view, name=\"doctor-signup\"),\n path('patient-signup', views.patient_signup_view, name=\"patient-signup\"),\n\n path('feeling', views.login_face, name=\"feeling\"),\n path('login',views.login,name=\"login\"),\n path('doctormain',views.doctormain,name=\"doctormain\"),\n path('doctorpatient',views.doctorpatient,name=\"doctorpatient\"),\n\n path('patientmain',views.patientmain,name=\"patientmain\"),\n \npath('patientchat',views.patientchat,name=\"patientchat\"),\n path('signup',views.signup,name=\"signup\"),\npath('chat', views.chat, name=\"chat\"),\npath('data', views.data, name=\"data\"),\n path('logout/',auth_views.LogoutView.as_view(),name='logout'),\n path('comingsoon', views.comingsoon,name='comingsoon'),\n\n path('start-exam/', views.start_exam_view,name='start-exam'),\n path('sendlogin/', views.recieve_login_face),\n path('calendar/', views.CalendarView.as_view(), name='calendar'),\n path('patient-calendar/', views.PatientCalendarView.as_view(), name='calendar'),\n path('paunaccept-calendar/', views.PaUnAccept.as_view(), name='paunaccept-calendar'),\n path('unaccept-calendar/', views.UnCalendarView.as_view(), name='unaccept-calendar'),\n\n\n path('paevent/new/', views.paevent, name='paevent_new'),\n path('paevent/edit//', views.paevent, name='paevent_edit'),\n\n path('patient-doctor/',views.patient_view_doctor,name=\"patient-doctor\"),\n path('event/new/', views.event, name='event_new'),\n path('event/edit//', views.event, name='event_edit'),\n\n path('thankyou/', views.marks,name='thankyou'),\n\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"vietmrvu/flatter","sub_path":"depression_Helper/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"73987378300","text":"from __future__ import annotations\n\nimport json\nimport logging\nfrom pathlib import Path\n\nimport click\n\nfrom . import __version__\nfrom .constants import *\nfrom .downloader import Downloader\n\nEXCLUDED_PARAMS = (\n \"urls\",\n \"config_location\",\n \"url_txt\",\n \"no_config_file\",\n \"version\",\n \"help\",\n)\n\n\ndef write_default_config_file(ctx: click.Context):\n ctx.params[\"config_location\"].parent.mkdir(parents=True, exist_ok=True)\n config_file = {\n param.name: param.default\n for param in ctx.command.params\n if param.name not in EXCLUDED_PARAMS\n }\n with open(ctx.params[\"config_location\"], \"w\") as f:\n f.write(json.dumps(config_file, indent=4))\n\n\ndef no_config_callback(\n ctx: click.Context, param: click.Parameter, no_config_file: bool\n):\n if no_config_file:\n return ctx\n if not ctx.params[\"config_location\"].exists():\n write_default_config_file(ctx)\n with open(ctx.params[\"config_location\"], \"r\") as f:\n config_file = dict(json.load(f))\n for param in ctx.command.params:\n if (\n config_file.get(param.name) is not None\n and not ctx.get_parameter_source(param.name)\n == click.core.ParameterSource.COMMANDLINE\n ):\n ctx.params[param.name] = param.type_cast_value(ctx, config_file[param.name])\n return ctx\n\n\n@click.command()\n@click.argument(\n \"urls\",\n nargs=-1,\n type=str,\n required=True,\n)\n@click.option(\n \"--final-path\",\n \"-f\",\n type=Path,\n default=\"./Spotify\",\n help=\"Path where the downloaded files will be saved.\",\n)\n@click.option(\n \"--temp-path\",\n \"-t\",\n type=Path,\n default=\"./temp\",\n help=\"Path where the temporary files will be saved.\",\n)\n@click.option(\n \"--cookies-location\",\n \"-c\",\n type=Path,\n default=\"./cookies.txt\",\n help=\"Location of the cookies file.\",\n)\n@click.option(\n \"--wvd-location\",\n \"-w\",\n type=Path,\n default=\"./device.wvd\",\n help=\"Location of the .wvd file.\",\n)\n@click.option(\n \"--config-location\",\n type=Path,\n default=Path.home() / \".spotify-aac-downloader\" / \"config.json\",\n help=\"Location of the config file.\",\n)\n@click.option(\n \"--ffmpeg-location\",\n type=str,\n default=\"ffmpeg\",\n help=\"Location of the FFmpeg binary.\",\n)\n@click.option(\n \"--aria2c-location\",\n type=str,\n default=\"aria2c\",\n help=\"Location of the aria2c binary.\",\n)\n@click.option(\n \"--template-folder-album\",\n type=str,\n default=\"{album_artist}/{album}\",\n help=\"Template of the album folders as a format string.\",\n)\n@click.option(\n \"--template-folder-compilation\",\n type=str,\n default=\"Compilations/{album}\",\n help=\"Template of the compilation album folders as a format string.\",\n)\n@click.option(\n \"--template-file-single-disc\",\n type=str,\n default=\"{track:02d} {title}\",\n help=\"Template of the song files for single-disc albums as a format string.\",\n)\n@click.option(\n \"--template-file-multi-disc\",\n type=str,\n default=\"{disc}-{track:02d} {title}\",\n help=\"Template of the song files for multi-disc albums as a format string.\",\n)\n@click.option(\n \"--download-mode\",\n type=click.Choice([\"native\", \"aria2c\"]),\n default=\"native\",\n help=\"Download mode.\",\n)\n@click.option(\n \"--exclude-tags\",\n \"-e\",\n type=str,\n default=None,\n help=\"List of tags to exclude from file tagging separated by commas.\",\n)\n@click.option(\n \"--truncate\",\n type=int,\n default=40,\n help=\"Maximum length of the file/folder names.\",\n)\n@click.option(\n \"--log-level\",\n \"-l\",\n type=click.Choice([\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]),\n default=\"INFO\",\n help=\"Log level.\",\n)\n@click.option(\n \"--premium-quality\",\n \"-p\",\n is_flag=True,\n help=\"Download in 256kbps AAC instead of 128kbps AAC.\",\n)\n@click.option(\n \"--lrc-only\",\n \"-l\",\n is_flag=True,\n help=\"Download only the synced lyrics.\",\n)\n@click.option(\n \"--no-lrc\",\n \"-n\",\n is_flag=True,\n help=\"Don't download the synced lyrics.\",\n)\n@click.option(\n \"--save-cover\",\n \"-s\",\n is_flag=True,\n help=\"Save cover as a separate file.\",\n)\n@click.option(\n \"--overwrite\",\n \"-o\",\n is_flag=True,\n help=\"Overwrite existing files.\",\n)\n@click.option(\n \"--print-exceptions\",\n is_flag=True,\n help=\"Print exceptions.\",\n)\n@click.option(\n \"--url-txt\",\n \"-u\",\n is_flag=True,\n help=\"Read URLs as location of text files containing URLs.\",\n)\n@click.option(\n \"--no-config-file\",\n \"-n\",\n is_flag=True,\n callback=no_config_callback,\n help=\"Don't use the config file.\",\n)\n@click.version_option(__version__, \"-v\", \"--version\")\n@click.help_option(\"-h\", \"--help\")\ndef main(\n urls: tuple[str],\n final_path: Path,\n temp_path: Path,\n cookies_location: Path,\n wvd_location: Path,\n config_location: Path,\n ffmpeg_location: str,\n aria2c_location: str,\n template_folder_album: str,\n template_folder_compilation: str,\n template_file_single_disc: str,\n template_file_multi_disc: str,\n download_mode: str,\n exclude_tags: str,\n truncate: int,\n log_level: str,\n premium_quality: bool,\n lrc_only: bool,\n no_lrc: bool,\n save_cover: bool,\n overwrite: bool,\n print_exceptions: bool,\n url_txt: bool,\n no_config_file: bool,\n):\n logging.basicConfig(\n format=\"[%(levelname)-8s %(asctime)s] %(message)s\",\n datefmt=\"%H:%M:%S\",\n )\n logger = logging.getLogger(__name__)\n logger.setLevel(log_level)\n logger.debug(\"Starting downloader\")\n downloader = Downloader(**locals())\n if not downloader.ffmpeg_location:\n logger.critical(X_NOT_FOUND_STRING.format(\"FFmpeg\", ffmpeg_location))\n return\n if download_mode == \"aria2c\" and not downloader.aria2c_location:\n logger.critical(X_NOT_FOUND_STRING.format(\"aria2c\", aria2c_location))\n return\n if cookies_location is not None and not cookies_location.exists():\n logger.critical(X_NOT_FOUND_STRING.format(\"Cookies\", cookies_location))\n return\n if not wvd_location.exists() and not lrc_only:\n logger.critical(X_NOT_FOUND_STRING.format(\".wvd file\", wvd_location))\n return\n if url_txt:\n logger.debug(\"Reading URLs from text files\")\n _urls = []\n for queue_item in urls:\n with open(queue_item, \"r\") as f:\n _urls.extend(f.read().splitlines())\n urls = tuple(_urls)\n if not lrc_only:\n if not wvd_location.exists():\n logger.critical(X_NOT_FOUND_STRING.format(\".wvd file\", wvd_location))\n return\n logger.debug(\"Setting up CDM\")\n downloader.setup_cdm()\n logger.debug(\"Setting up session\")\n downloader.setup_session()\n if premium_quality and downloader.is_premium == \"false\":\n logger.critical(\"Cannot download in premium quality with a free account\")\n return\n download_queue = []\n error_count = 0\n for url_index, url in enumerate(urls, start=1):\n current_url = f\"URL {url_index}/{len(urls)}\"\n try:\n logger.debug(f'({current_url}) Checking \"{url}\"')\n download_queue.append(downloader.get_download_queue(url))\n except Exception:\n error_count += 1\n logger.error(\n f'({current_url}) Failed to check \"{url}\"',\n exc_info=print_exceptions,\n )\n for queue_item_index, queue_item in enumerate(download_queue, start=1):\n for track_index, track in enumerate(queue_item, start=1):\n current_track = f\"Track {track_index}/{len(queue_item)} from URL {queue_item_index}/{len(download_queue)}\"\n try:\n logger.info(f'({current_track}) Downloading \"{track[\"name\"]}\"')\n track_id = track[\"id\"]\n logger.debug(f\"Getting metadata\")\n gid = downloader.uri_to_gid(track_id)\n metadata = downloader.get_metadata(gid)\n logger.debug(f\"Getting lyrics\")\n lyrics_unsynced, lyrics_synced = downloader.get_lyrics(\n track_id, metadata[\"has_lyrics\"]\n )\n tags = downloader.get_tags(metadata, lyrics_unsynced)\n final_location = downloader.get_final_location(tags)\n lrc_location = downloader.get_lrc_location(final_location)\n cover_location = downloader.get_cover_location(final_location)\n cover_url = downloader.get_cover_url(metadata)\n if lrc_only:\n pass\n elif final_location.exists() and not overwrite:\n logger.warning(\n f'({current_track}) Track already exists at \"{final_location}\", skipping'\n )\n else:\n logger.debug(f\"Getting file info\")\n file_id = downloader.get_file_id(metadata)\n if not file_id:\n logger.error(\n f\"({current_track}) Track not available on Spotify's \"\n \"servers and no alternative found, skipping\"\n )\n continue\n logger.debug(f\"Getting PSSH\")\n pssh = downloader.get_pssh(file_id)\n logger.debug(f\"Getting decryption key\")\n decryption_key = downloader.get_decryption_key(pssh)\n logger.debug(f\"Getting stream URL\")\n stream_url = downloader.get_stream_url(file_id)\n encrypted_location = downloader.get_encrypted_location(track_id)\n logger.debug(f'Downloading to \"{encrypted_location}\"')\n if download_mode == \"native\":\n downloader.download_native(encrypted_location, stream_url)\n if download_mode == \"aria2c\":\n downloader.download_aria2c(encrypted_location, stream_url)\n fixed_location = downloader.get_fixed_location(track_id)\n logger.debug(f'Remuxing to \"{fixed_location}\"')\n downloader.fixup(decryption_key, encrypted_location, fixed_location)\n logger.debug(f\"Applying tags\")\n downloader.apply_tags(fixed_location, tags, cover_url)\n logger.debug(f'Moving to \"{final_location}\"')\n downloader.move_to_final_location(fixed_location, final_location)\n if no_lrc or not lyrics_synced:\n pass\n elif lrc_location.exists() and not overwrite:\n logger.debug(\n f'Synced lyrics already exists at \"{lrc_location}\", skipping'\n )\n else:\n logger.debug(f'Saving synced lyrics to \"{lrc_location}\"')\n downloader.save_lrc(lrc_location, lyrics_synced)\n if lrc_only or not save_cover:\n pass\n elif cover_location.exists() and not overwrite:\n logger.debug(\n f'Cover already exists at \"{cover_location}\", skipping'\n )\n else:\n logger.debug(f'Saving cover to \"{cover_location}\"')\n downloader.save_cover(cover_location, cover_url)\n except Exception:\n error_count += 1\n logger.error(\n f'({current_track}) Failed to download \"{track[\"name\"]}\"',\n exc_info=print_exceptions,\n )\n finally:\n if temp_path.exists():\n logger.debug(f'Cleaning up \"{temp_path}\"')\n downloader.cleanup_temp_path()\n logger.info(f\"Done ({error_count} error(s))\")\n","repo_name":"glomatico/spotify-aac-downloader","sub_path":"spotify_aac_downloader/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":11832,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"23"} +{"seq_id":"21637082663","text":"\nfrom operator import index\n\n\ndef bubble(listA):\n indexing_length = len(listA) - 1\n sorted = False\n\n while not sorted:\n sorted = True\n\n for i in range(0, indexing_length):\n if listA[i] > listA[i+1]:\n sorted = False\n listA[i], listA[i+1] = listA[i+1], listA[i]\n return listA\n\nprint(bubble([4,6,8,3,2,5,7,8,9]))","repo_name":"mawais54013/Python-Sorting","sub_path":"bubble-sort.py","file_name":"bubble-sort.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"69869452860","text":"import os\nimport pyjson5\nimport shutil\nfrom pathlib import Path\nimport pytest\n\nfrom fbmr.conditions import SubimageCondition\nfrom fbmr.config import Config, Action\nfrom fbmr.effects import ClickSubimageEffect\n\nTESTDATA_COND = \"tests/test_data_conditions/\"\nTESTDATA_CONFIG = \"tests/test_data_config/\"\n\n\n@pytest.fixture\ndef setup_and_teardown():\n \"\"\"Setup and teardown code.\"\"\"\n nuke_test_folder()\n yield\n nuke_test_folder()\n\n\ndef nuke_test_folder():\n if os.path.exists(TESTDATA_CONFIG):\n shutil.rmtree(TESTDATA_CONFIG)\n Path(TESTDATA_CONFIG).mkdir(parents=True, exist_ok=True)\n\n\ndef test_create(setup_and_teardown):\n config = Config(TESTDATA_CONFIG, \"test1\", create_if_missing=True)\n action = Action(\"empty\", [], [], True, [], 0, None, TESTDATA_CONFIG + \"test2\")\n config.add_action(action)\n assert config.make_json() == {'name': 'test1', 'actions': [\n {'name': 'empty', 'cooldown': 0, 'conditions': [], 'effects': [], 'is_enabled': True, 'next_action_names': []}],\n 'confirmAll': False, 'screenshot_size': None}\n\n # Read\n config2 = Config(TESTDATA_CONFIG, \"test1\")\n assert config2.make_json() == config.make_json()\n\n\ndef test_create2(setup_and_teardown):\n config = Config(TESTDATA_CONFIG, \"test2\", create_if_missing=True)\n condition = SubimageCondition(\n TESTDATA_COND + \"button.png\",\n (603, 914, 603 + 503, 914 + 346),\n 80,\n 1.0,\n \"button\"\n )\n effect = ClickSubimageEffect(\n TESTDATA_COND + \"button.png\",\n (603, 914, 603 + 503, 914 + 346),\n [1, 2]\n )\n\n action = Action(\"action1\", [condition], [effect], True, [], 0, None, TESTDATA_CONFIG + \"test2\")\n config.add_action(action)\n assert config.make_json() == {\n 'name': 'test2',\n 'actions': [{\n 'name': 'action1',\n 'conditions': [{\n 'type': 'SubimageCondition',\n 'image_path': f'{TESTDATA_COND}button.png',\n 'intended_region': (603, 914, 1106, 1260),\n 'threshold': 80,\n 'weight': 1.0,\n 'save_region_as': 'button'}],\n 'cooldown': 0,\n 'effects': [{\n 'type': 'ClickSubimageEffect',\n 'image_path': f'{TESTDATA_COND}button.png',\n 'intended_region': (603, 914, 1106, 1260),\n 'tap_coords_in_image': [1, 2],\n }],\n 'is_enabled': True,\n 'next_action_names': []},\n ],\n 'screenshot_size': None,\n 'confirmAll': False}\n\n # Read\n config2 = Config(TESTDATA_CONFIG, \"test2\")\n assert pyjson5.dumps(config2.make_json()) == pyjson5.dumps(\n config.make_json())\n\n # Add 2nd action\n action = Action(\"action2\", [condition], [effect], True, [], 0, None, TESTDATA_CONFIG + \"test2\")\n config.add_action(action)\n\n # Read\n config2 = Config(TESTDATA_CONFIG, \"test2\")\n assert len(config2.make_json()['actions']) == 2\n action_names = sorted([aj['name'] for aj in config2.make_json()['actions']])\n assert action_names == ['action1', 'action2']\n\n","repo_name":"alac/flashbackmacrorecorder","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"37018233666","text":"from turtle import *\n\nspeed(1)\n\n\ndef square():\n fillcolor('green')\n begin_fill()\n for _ in range(4):\n forward(200)\n right(90)\n end_fill()\n\n\ndef triangle():\n fillcolor('red')\n begin_fill()\n goto(220, 0)\n goto(100, 150)\n goto(-20, 0)\n goto(0, 0)\n end_fill()\n\n\nsquare()\ntriangle()\n","repo_name":"dimkamass/pythonProject","sub_path":"Turtle_module/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"1850152824","text":"import os\r\nfrom multiprocessing import Pool\r\nfrom math import ceil\r\n\r\ntry:\r\n import cv2\r\n import numpy as np\r\nexcept ImportError:\r\n print(\"You don't seem to have OpenCV or numpy installed. Please check and try again.\")\r\n exit()\r\n\r\nRGB = {\"-\": 0x1A2A3A, # Wall\r\n \"*\": 0xAAAAAA, # Non-Visited Tile\r\n \"@\": 0x6699FF, # Visited Tile\r\n \"!\": 0xFF6666, # Final Path\r\n \"#\": 0xFFFF66, # Start Tile\r\n \"$\": 0x66FF99, # End Tile\r\n \"?\": 0xDDDDDD # Grid Border\r\n }\r\n\r\ndef __BGR(h): return ((h&0xFF, (h&0xFF00)>>8, (h&0xFF0000)>>16))\r\n\r\ndef __createFolder(foldername):\r\n try:\r\n os.mkdir(foldername)\r\n except:\r\n for file in os.listdir(foldername):\r\n filepath = os.path.join(foldername, file)\r\n if os.path.isfile(filepath) and filepath.endswith(\".png\"):\r\n os.unlink(filepath)\r\n\r\ndef __generateVideo(filename, framerate):\r\n directory = None\r\n\r\n # If file in inside a folder, move to it\r\n if \"/\" in filename:\r\n directory, filename = filename.split(\"/\")\r\n os.chdir(directory)\r\n if \"\\\\\" in filename:\r\n directory, filename = filename.split(\"\\\\\")\r\n os.chdir(directory)\r\n\r\n print(\"Writing to '{}.mp4'...\".format(filename))\r\n\r\n # Generate video file\r\n os.chdir(filename)\r\n os.system(\"ffmpeg -y -hide_banner -loglevel warning -framerate {} -i %d.png -c:v libx264 -pix_fmt yuv420p ../{}.mp4\".format(framerate, filename))\r\n os.chdir(\"..\")\r\n\r\n # Remove frames folder\r\n for file in os.listdir(filename):\r\n filepath = os.path.join(filename, file)\r\n if os.path.isfile(filepath) and filepath.endswith(\".png\"):\r\n os.unlink(filepath)\r\n os.rmdir(filename)\r\n\r\n # If file was inside a folder, move out of it\r\n if directory:\r\n os.chdir(\"..\")\r\n\r\ndef __generateFrame(i, filename, solf, w, h, M):\r\n # Convert to pixels\r\n pixels = np.array([[__BGR(RGB[solf[i][j]]) for j in range(h)] for i in range(w)])\r\n\r\n # Save as image\r\n filepath = os.path.join(\"{}\".format(filename), \"{}.png\".format(i))\r\n cv2.imwrite(filepath, cv2.resize(pixels, (w*M, h*M), interpolation=cv2.INTER_NEAREST))\r\n\r\ndef __generateGridFrame(i, filename, solTLf, solTRf, solBLf, solBRf, w, h, B, W, H, M):\r\n # Get every solution\r\n tl = np.array(solTLf)\r\n tr = np.array(solTRf)\r\n bl = np.array(solBLf)\r\n br = np.array(solBRf)\r\n\r\n # Join all four in a 2x2 grid\r\n all4 = np.full((W, H), \"?\")\r\n all4[B:w+B, B:h+B] = tl\r\n all4[B:w+B, h+B+B:-B] = tr\r\n all4[w+B+B:-B, B:h+B] = bl\r\n all4[w+B+B:-B, h+B+B:-B] = br\r\n\r\n # Generate frame\r\n __generateFrame(i, filename, all4, W, H, M)\r\n\r\ndef animate(sol, filename, length=15):\r\n print(\"Animating '{}'...\".format(filename))\r\n\r\n # Solution res\r\n w, h = len(sol.frames[0]), len(sol.frames[0][0])\r\n\r\n # Max res of resulting image\r\n maxResW, maxResH = 1080, 1080\r\n M = min(maxResW//w, maxResH//h)\r\n\r\n # Amount of frames needed to reproduce the path\r\n duration = len(sol.frames)\r\n\r\n # Create (or empty) folder\r\n __createFolder(filename)\r\n\r\n # Maximum amount of frames to generate\r\n maxFrames = 3000\r\n\r\n print(\"Generating {} frames...\".format(duration-1))\r\n\r\n # Prepare frames\r\n frames = []\r\n for i in range(1, duration):\r\n if i != 1 and i % (duration//min(maxFrames, duration)) != 0 and i != duration-1: continue\r\n frames.append((i, filename, sol.frames[i], w, h, M))\r\n\r\n # Generate frames (in parallel)\r\n pool = Pool()\r\n pool.starmap(__generateFrame, frames)\r\n pool.close()\r\n pool.join()\r\n\r\n # Set video framerate\r\n framerate = ceil(min(maxFrames, duration)/length)\r\n\r\n # Generate video and remove frames folder\r\n __generateVideo(filename, framerate)\r\n\r\ndef animateGrid(solTL, solTR, solBL, solBR, filename, length=15):\r\n print(\"Animating '{}'...\".format(filename))\r\n\r\n # Assert every solution has the same res\r\n assert(len(solTL.frames[0]) == len(solTR.frames[0]) == len(solBL.frames[0]) == len(solBR.frames[0]))\r\n assert(len(solTL.frames[0][0]) == len(solTR.frames[0][0]) == len(solBL.frames[0][0]) == len(solBR.frames[0][0]))\r\n\r\n # Solution res\r\n w, h = len(solTL.frames[0]), len(solTL.frames[0][0])\r\n\r\n # Size of borders\r\n B = max(1, min(w//50, h//50))\r\n\r\n # 2x2 grid res\r\n W = w*2 + 3*B\r\n H = h*2 + 3*B\r\n\r\n # Max res of resulting image\r\n maxResW, maxResH = 1080, 1080\r\n M = min(maxResW//W, maxResH//H)\r\n\r\n # Amount of frames needed to reproduce all paths\r\n duration = max(len(solTL.frames), len(solTR.frames), len(solBL.frames), len(solBR.frames))\r\n\r\n # Create (or empty) folder\r\n __createFolder(filename)\r\n\r\n # Maximum amount of frames to generate\r\n maxFrames = 3000\r\n\r\n print(\"Generating {} frames...\".format(duration-1))\r\n\r\n # Prepare frames\r\n frames = []\r\n for i in range(1, duration):\r\n if i != 1 and i % (duration//min(maxFrames, duration)) != 0 and i != duration-1: continue\r\n frames.append((i, filename,\r\n solTL.frames[i if i < len(solTL.frames) else -1],\r\n solTR.frames[i if i < len(solTR.frames) else -1],\r\n solBL.frames[i if i < len(solBL.frames) else -1],\r\n solBR.frames[i if i < len(solBR.frames) else -1],\r\n w, h, B, W, H, M))\r\n\r\n # Generate frames (in parallel)\r\n pool = Pool()\r\n pool.starmap(__generateGridFrame, frames)\r\n pool.close()\r\n pool.join()\r\n\r\n # Set video framerate\r\n framerate = ceil(min(maxFrames, duration)/length)\r\n\r\n # Generate video and remove frames folder\r\n __generateVideo(filename, framerate)","repo_name":"KanegaeGabriel/pathfinder","sub_path":"Animator.py","file_name":"Animator.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"34018426360","text":"import sys\n\ntry:\n import _thread\nexcept:\n print(\"1..0\")\n sys.exit(0)\n\nimport perl\nif not perl.MULTI_PERL:\n print(\"1..0\")\n sys.exit(0)\n\n# This tests behaviour of perl objects passed from one\n# thread (and perl interpreter) to the next one and that\n# it is still destructed properly.\n\nprint(\"1..5\")\n\nperl_obj = perl.eval(\"\"\"\n\nsub Foo::hello {\n return \"Hello\";\n}\n\nsub Foo::DESTROY\n{\n my $self = shift;\n print \"ok 2\\n\";\n}\n\nbless {}, \"Foo\";\n\n\"\"\")\n\n#print perl_obj.hello();\n#print perl_obj\n\ndef t1():\n global perl_obj\n try:\n perl_obj.hello()\n print(\"not \")\n except ValueError as v:\n print(\"ok 1\")\n #print v\n\n perl.eval(\"\"\"sub Foo::DESTROY { $|=1; print \"ok 4\\n\"; }\"\"\");\n\n perl_obj = perl.get_ref(\"@\")\n perl_obj.__class__ = \"Foo\";\n #print perl_obj\n print(\"ok 3\")\n sys.stdout.flush();\n\n_thread.start_new_thread(t1, ())\n\nimport time\ntime.sleep(2)\n#print perl_obj\nperl_obj = None\n\nprint(\"ok 5\")\n\n\n\n","repo_name":"nikicat/python-perlmodule","sub_path":"t/thr-svrv.py","file_name":"thr-svrv.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"23"}